file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
src/ray/common/id_def.h
|
C/C++ Header
|
// This header file is used to avoid code duplication.
// It can be included multiple times in id.h, and each inclusion
// could use a different definition of the DEFINE_UNIQUE_ID macro.
// Macro definition format: DEFINE_UNIQUE_ID(id_type).
// NOTE: This file should NOT be included in any file other than id.h.
DEFINE_UNIQUE_ID(FunctionID)
DEFINE_UNIQUE_ID(ActorClassID)
DEFINE_UNIQUE_ID(ActorCheckpointID)
DEFINE_UNIQUE_ID(WorkerID)
DEFINE_UNIQUE_ID(ConfigID)
DEFINE_UNIQUE_ID(ClientID)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/id_test.cc
|
C++
|
#include "gtest/gtest.h"
#include "ray/common/common_protocol.h"
#include "ray/common/task/task_spec.h"
namespace ray {
void TestReturnObjectId(const TaskID &task_id, int64_t return_index,
uint8_t transport_type) {
// Round trip test for computing the object ID for a task's return value,
// then computing the task ID that created the object.
ObjectID return_id = ObjectID::ForTaskReturn(task_id, return_index, transport_type);
ASSERT_TRUE(return_id.CreatedByTask());
ASSERT_TRUE(return_id.IsReturnObject());
ASSERT_FALSE(return_id.IsPutObject());
ASSERT_EQ(return_id.TaskId(), task_id);
ASSERT_TRUE(transport_type == return_id.GetTransportType());
ASSERT_EQ(return_id.ObjectIndex(), return_index);
}
void TestPutObjectId(const TaskID &task_id, int64_t put_index) {
// Round trip test for computing the object ID for a task's put value, then
// computing the task ID that created the object.
ObjectID put_id = ObjectID::ForPut(task_id, put_index, 1);
ASSERT_TRUE(put_id.CreatedByTask());
ASSERT_FALSE(put_id.IsReturnObject());
ASSERT_TRUE(put_id.IsPutObject());
ASSERT_EQ(put_id.TaskId(), task_id);
ASSERT_TRUE(1 == put_id.GetTransportType());
ASSERT_EQ(put_id.ObjectIndex(), put_index);
}
void TestRandomObjectId() {
// Round trip test for computing the object ID from random.
const ObjectID random_object_id = ObjectID::FromRandom();
ASSERT_FALSE(random_object_id.CreatedByTask());
}
const static JobID kDefaultJobId = JobID::FromInt(199);
const static TaskID kDefaultDriverTaskId = TaskID::ForDriverTask(kDefaultJobId);
TEST(ActorIDTest, TestActorID) {
{
// test from binary
const ActorID actor_id_1 = ActorID::Of(kDefaultJobId, kDefaultDriverTaskId, 1);
const auto actor_id_1_binary = actor_id_1.Binary();
const auto actor_id_2 = ActorID::FromBinary(actor_id_1_binary);
ASSERT_EQ(actor_id_1, actor_id_2);
}
{
// test get job id
const ActorID actor_id = ActorID::Of(kDefaultJobId, kDefaultDriverTaskId, 1);
ASSERT_EQ(kDefaultJobId, actor_id.JobId());
}
}
TEST(TaskIDTest, TestTaskID) {
// Round trip test for task ID.
{
const ActorID actor_id = ActorID::Of(kDefaultJobId, kDefaultDriverTaskId, 1);
const TaskID task_id_1 =
TaskID::ForActorTask(kDefaultJobId, kDefaultDriverTaskId, 1, actor_id);
ASSERT_EQ(actor_id, task_id_1.ActorId());
}
}
TEST(ObjectIDTest, TestObjectID) {
const static ActorID default_actor_id =
ActorID::Of(kDefaultJobId, kDefaultDriverTaskId, 1);
const static TaskID default_task_id =
TaskID::ForActorTask(kDefaultJobId, kDefaultDriverTaskId, 1, default_actor_id);
{
// test for put
TestPutObjectId(default_task_id, 1);
TestPutObjectId(default_task_id, 2);
TestPutObjectId(default_task_id, ObjectID::kMaxObjectIndex);
}
{
// test for return
TestReturnObjectId(default_task_id, 1, 2);
TestReturnObjectId(default_task_id, 2, 3);
TestReturnObjectId(default_task_id, ObjectID::kMaxObjectIndex, 4);
}
{
// test random object id
TestRandomObjectId();
}
}
TEST(NilTest, TestIsNil) {
ASSERT_TRUE(TaskID().IsNil());
ASSERT_TRUE(TaskID::Nil().IsNil());
ASSERT_TRUE(ObjectID().IsNil());
ASSERT_TRUE(ObjectID::Nil().IsNil());
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/ray_config.h
|
C/C++ Header
|
#ifndef RAY_CONFIG_H
#define RAY_CONFIG_H
#include <sstream>
#include <unordered_map>
#include "ray/util/logging.h"
class RayConfig {
/// -----------Include ray_config_def.h to define config items.----------------
/// A helper macro that defines a config item.
/// In particular, this generates a private field called `name_` and a public getter
/// method called `name()` for a given config item.
///
/// \param type Type of the config item.
/// \param name Name of the config item.
/// \param default_value Default value of the config item.
#define RAY_CONFIG(type, name, default_value) \
private: \
type name##_ = default_value; \
\
public: \
inline type name() { return name##_; }
#include "ray_config_def.h"
/// -------------------------------------------------------------------------
#undef RAY_CONFIG
public:
static RayConfig &instance() {
static RayConfig config;
return config;
}
// clang-format off
/// -----------Include ray_config_def.h to set config items.-------------------
/// A helper macro that helps to set a value to a config item.
#define RAY_CONFIG(type, name, default_value) \
if (pair.first == #name) { \
std::istringstream stream(pair.second); \
stream >> name##_; \
continue; \
}
void initialize(const std::unordered_map<std::string, std::string> &config_map) {
for (auto const &pair : config_map) {
// We use a big chain of if else statements because C++ doesn't allow
// switch statements on strings.
#include "ray_config_def.h"
RAY_LOG(FATAL) << "Received unexpected config parameter " << pair.first;
}
}
/// ---------------------------------------------------------------------
#undef RAY_CONFIG
};
// clang-format on
#endif // RAY_CONFIG_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/ray_config_def.h
|
C/C++ Header
|
// This header file is used to avoid code duplication.
// It can be included multiple times in ray_config.h, and each inclusion
// could use a different definition of the RAY_CONFIG macro.
// Macro definition format: RAY_CONFIG(type, name, default_value).
// NOTE: This file should NOT be included in any file other than ray_config.h.
// IF YOU MODIFY THIS FILE and add a configuration parameter, you must change
// at least two additional things:
// 1. You must update the file "ray/python/ray/includes/ray_config.pxd".
// 2. You must update the file "ray/python/ray/includes/ray_config.pxi".
/// In theory, this is used to detect Ray cookie mismatches.
/// This magic number (hex for "RAY") is used instead of zero, rationale is
/// that it could still be possible that some random program sends an int64_t
/// which is zero, but it's much less likely that a program sends this
/// particular magic number.
RAY_CONFIG(int64_t, ray_cookie, 0x5241590000000000)
/// The duration that a single handler on the event loop can take before a
/// warning is logged that the handler is taking too long.
RAY_CONFIG(int64_t, handler_warning_timeout_ms, 100)
/// The duration between heartbeats sent by the raylets.
RAY_CONFIG(int64_t, raylet_heartbeat_timeout_milliseconds, 100)
/// If a component has not sent a heartbeat in the last num_heartbeats_timeout
/// heartbeat intervals, the raylet monitor process will report
/// it as dead to the db_client table.
RAY_CONFIG(int64_t, num_heartbeats_timeout, 300)
/// For a raylet, if the last heartbeat was sent more than this many
/// heartbeat periods ago, then a warning will be logged that the heartbeat
/// handler is drifting.
RAY_CONFIG(uint64_t, num_heartbeats_warning, 5)
/// The duration between dumping debug info to logs, or -1 to disable.
RAY_CONFIG(int64_t, debug_dump_period_milliseconds, 10000)
/// Whether to enable fair queueing between task classes in raylet. When
/// fair queueing is enabled, the raylet will try to balance the number
/// of running tasks by class (i.e., function name). This prevents one
/// type of task from starving other types (see issue #3664).
RAY_CONFIG(bool, fair_queueing_enabled, true)
/// Whether to enable object pinning for plasma objects. When this is
/// enabled, objects in scope in the cluster will not be LRU evicted.
RAY_CONFIG(bool, object_pinning_enabled, true)
/// Whether to enable the new scheduler. The new scheduler is designed
/// only to work with direct calls. Once direct calls afre becoming
/// the default, this scheduler will also become the default.
RAY_CONFIG(bool, new_scheduler_enabled, false)
// The max allowed size in bytes of a return object from direct actor calls.
// Objects larger than this size will be spilled/promoted to plasma.
RAY_CONFIG(int64_t, max_direct_call_object_size, 100 * 1024)
// The min number of retries for direct actor creation tasks. The actual number
// of creation retries will be MAX(actor_creation_min_retries, max_reconstructions).
RAY_CONFIG(uint64_t, actor_creation_min_retries, 3)
/// The initial period for a task execution lease. The lease will expire this
/// many milliseconds after the first acquisition of the lease. Nodes that
/// require an object will not try to reconstruct the task until at least
/// this many milliseconds.
RAY_CONFIG(int64_t, initial_reconstruction_timeout_milliseconds, 10000)
/// The maximum duration that workers can hold on to another worker's lease
/// for direct task submission until it must be returned to the raylet.
RAY_CONFIG(int64_t, worker_lease_timeout_milliseconds, 500)
/// The duration between heartbeats sent from the workers to the raylet.
/// If set to a negative value, the heartbeats will not be sent.
/// These are used to report active object IDs for garbage collection and
/// to ensure that workers go down when the raylet dies unexpectedly.
RAY_CONFIG(int64_t, worker_heartbeat_timeout_milliseconds, 1000)
/// These are used by the worker to set timeouts and to batch requests when
/// getting objects.
RAY_CONFIG(int64_t, get_timeout_milliseconds, 1000)
RAY_CONFIG(int64_t, worker_get_request_size, 10000)
RAY_CONFIG(int64_t, worker_fetch_request_size, 10000)
/// This is used to bound the size of the Raylet's lineage cache. This is
/// the maximum uncommitted lineage size that any remote task in the cache
/// can have before eviction will be attempted.
RAY_CONFIG(uint64_t, max_lineage_size, 100)
/// This is a temporary constant used by actors to determine how many dummy
/// objects to store.
RAY_CONFIG(int64_t, actor_max_dummy_objects, 1000)
/// Number of times we try connecting to a socket.
RAY_CONFIG(int64_t, num_connect_attempts, 5)
RAY_CONFIG(int64_t, connect_timeout_milliseconds, 500)
/// The duration that the raylet will wait before reinitiating a
/// fetch request for a missing task dependency. This time may adapt based on
/// the number of missing task dependencies.
RAY_CONFIG(int64_t, raylet_fetch_timeout_milliseconds, 1000)
/// The duration that the raylet will wait between initiating
/// reconstruction calls for missing task dependencies. If there are many
/// missing task dependencies, we will only iniate reconstruction calls for
/// some of them each time.
RAY_CONFIG(int64_t, raylet_reconstruction_timeout_milliseconds, 1000)
/// The maximum number of objects that the raylet will issue
/// reconstruct calls for in a single pass through the reconstruct object
/// timeout handler.
RAY_CONFIG(int64_t, max_num_to_reconstruct, 10000)
/// The maximum number of objects to include in a single fetch request in the
/// regular raylet fetch timeout handler.
RAY_CONFIG(int64_t, raylet_fetch_request_size, 10000)
/// The maximum number of active object IDs to report in a heartbeat.
/// # NOTE: currently disabled by default.
RAY_CONFIG(size_t, raylet_max_active_object_ids, 0)
/// The duration that we wait after sending a worker SIGTERM before sending
/// the worker SIGKILL.
RAY_CONFIG(int64_t, kill_worker_timeout_milliseconds, 100)
/// This is a timeout used to cause failures in the plasma manager and raylet
/// when certain event loop handlers take too long.
RAY_CONFIG(int64_t, max_time_for_handler_milliseconds, 1000)
/// This is used by the Python extension when serializing objects as part of
/// a task spec.
RAY_CONFIG(int64_t, size_limit, 10000)
RAY_CONFIG(int64_t, num_elements_limit, 10000)
/// This is used to cause failures when a certain loop in redis.cc which
/// synchronously looks up object manager addresses in redis is slow.
RAY_CONFIG(int64_t, max_time_for_loop, 1000)
/// Allow up to 5 seconds for connecting to Redis.
RAY_CONFIG(int64_t, redis_db_connect_retries, 50)
RAY_CONFIG(int64_t, redis_db_connect_wait_milliseconds, 100)
/// TODO(rkn): These constants are currently unused.
RAY_CONFIG(int64_t, plasma_default_release_delay, 64)
RAY_CONFIG(int64_t, L3_cache_size_bytes, 100000000)
/// Constants for the spillback scheduling policy.
RAY_CONFIG(int64_t, max_tasks_to_spillback, 10)
/// Every time an actor creation task has been spilled back a number of times
/// that is a multiple of this quantity, a warning will be pushed to the
/// corresponding driver. Since spillback currently occurs on a 100ms timer,
/// a value of 100 corresponds to a warning every 10 seconds.
RAY_CONFIG(int64_t, actor_creation_num_spillbacks_warning, 100)
/// If a node manager attempts to forward a task to another node manager and
/// the forward fails, then it will resubmit the task after this duration.
RAY_CONFIG(int64_t, node_manager_forward_task_retry_timeout_milliseconds, 1000)
/// Timeout, in milliseconds, to wait before retrying a failed pull in the
/// ObjectManager.
RAY_CONFIG(int, object_manager_pull_timeout_ms, 10000)
/// Timeout, in milliseconds, to wait until the Push request fails.
/// Special value:
/// Negative: waiting infinitely.
/// 0: giving up retrying immediately.
RAY_CONFIG(int, object_manager_push_timeout_ms, 10000)
/// The period of time that an object manager will wait before pushing the
/// same object again to a specific object manager.
RAY_CONFIG(int, object_manager_repeated_push_delay_ms, 60000)
/// Default chunk size for multi-chunk transfers to use in the object manager.
/// In the object manager, no single thread is permitted to transfer more
/// data than what is specified by the chunk size unless the number of object
/// chunks exceeds the number of available sending threads.
RAY_CONFIG(uint64_t, object_manager_default_chunk_size, 1000000)
/// Number of workers per Python worker process
RAY_CONFIG(int, num_workers_per_process_python, 1)
/// Number of workers per Java worker process
RAY_CONFIG(int, num_workers_per_process_java, 10)
/// Maximum timeout in milliseconds within which a task lease must be renewed.
RAY_CONFIG(int64_t, max_task_lease_timeout_ms, 60000)
/// Maximum number of checkpoints to keep in GCS for an actor.
/// Note: this number should be set to at least 2. Because saving a application
/// checkpoint isn't atomic with saving the backend checkpoint, and it will break
/// if this number is set to 1 and users save application checkpoints in place.
RAY_CONFIG(int32_t, num_actor_checkpoints_to_keep, 20)
/// Maximum number of ids in one batch to send to GCS to delete keys.
RAY_CONFIG(uint32_t, maximum_gcs_deletion_batch_size, 1000)
/// When getting objects from object store, print a warning every this number of attempts.
RAY_CONFIG(uint32_t, object_store_get_warn_per_num_attempts, 50)
/// When getting objects from object store, max number of ids to print in the warning
/// message.
RAY_CONFIG(uint32_t, object_store_get_max_ids_to_print_in_warning, 20)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/ray_object.cc
|
C++
|
#include "ray/common/ray_object.h"
namespace ray {
std::shared_ptr<LocalMemoryBuffer> MakeErrorMetadataBuffer(rpc::ErrorType error_type) {
std::string meta = std::to_string(static_cast<int>(error_type));
auto metadata = const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(meta.data()));
auto meta_buffer =
std::make_shared<LocalMemoryBuffer>(metadata, meta.size(), /*copy_data=*/true);
return meta_buffer;
}
RayObject::RayObject(rpc::ErrorType error_type)
: RayObject(nullptr, MakeErrorMetadataBuffer(error_type)) {}
bool RayObject::IsException(rpc::ErrorType *error_type) const {
if (metadata_ == nullptr) {
return false;
}
// TODO (kfstorm): metadata should be structured.
const std::string metadata(reinterpret_cast<const char *>(metadata_->Data()),
metadata_->Size());
const auto error_type_descriptor = ray::rpc::ErrorType_descriptor();
for (int i = 0; i < error_type_descriptor->value_count(); i++) {
const auto error_type_number = error_type_descriptor->value(i)->number();
if (metadata == std::to_string(error_type_number)) {
if (error_type) {
*error_type = rpc::ErrorType(error_type_number);
}
return true;
}
}
return false;
}
bool RayObject::IsInPlasmaError() const {
if (metadata_ == nullptr) {
return false;
}
const std::string metadata(reinterpret_cast<const char *>(metadata_->Data()),
metadata_->Size());
return metadata == std::to_string(ray::rpc::ErrorType::OBJECT_IN_PLASMA);
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/ray_object.h
|
C/C++ Header
|
#ifndef RAY_COMMON_RAY_OBJECT_H
#define RAY_COMMON_RAY_OBJECT_H
#include "ray/common/buffer.h"
#include "ray/protobuf/gcs.pb.h"
#include "ray/util/logging.h"
namespace ray {
/// Binary representation of a ray object, consisting of buffer pointers to data and
/// metadata. A ray object may have both data and metadata, or only one of them.
class RayObject {
public:
/// Create a ray object instance.
///
/// Set `copy_data` to `false` is fine for most cases - for example when putting
/// an object into store with a temporary RayObject, and we don't want to do an extra
/// copy. But in some cases we do want to always hold a valid data - for example, memory
/// store uses RayObject to represent objects, in this case we actually want the object
/// data to remain valid after user puts it into store.
///
/// \param[in] data Data of the ray object.
/// \param[in] metadata Metadata of the ray object.
/// \param[in] copy_data Whether this class should hold a copy of data.
RayObject(const std::shared_ptr<Buffer> &data, const std::shared_ptr<Buffer> &metadata,
bool copy_data = false)
: data_(data), metadata_(metadata), has_data_copy_(copy_data) {
if (has_data_copy_) {
// If this object is required to hold a copy of the data,
// make a copy if the passed in buffers don't already have a copy.
if (data_ && !data_->OwnsData()) {
data_ = std::make_shared<LocalMemoryBuffer>(data_->Data(), data_->Size(),
/*copy_data=*/true);
}
if (metadata_ && !metadata_->OwnsData()) {
metadata_ = std::make_shared<LocalMemoryBuffer>(
metadata_->Data(), metadata_->Size(), /*copy_data=*/true);
}
}
RAY_CHECK(data_ || metadata_) << "Data and metadata cannot both be empty.";
}
RayObject(rpc::ErrorType error_type);
/// Return the data of the ray object.
const std::shared_ptr<Buffer> &GetData() const { return data_; };
/// Return the metadata of the ray object.
const std::shared_ptr<Buffer> &GetMetadata() const { return metadata_; };
uint64_t GetSize() const {
uint64_t size = 0;
size += (data_ != nullptr) ? data_->Size() : 0;
size += (metadata_ != nullptr) ? metadata_->Size() : 0;
return size;
}
/// Whether this object has data.
bool HasData() const { return data_ != nullptr; }
/// Whether this object has metadata.
bool HasMetadata() const { return metadata_ != nullptr; }
/// Whether the object represents an exception.
bool IsException(rpc::ErrorType *error_type = nullptr) const;
/// Whether the object has been promoted to plasma (i.e., since it was too
/// large to return directly as part of a gRPC response).
bool IsInPlasmaError() const;
private:
std::shared_ptr<Buffer> data_;
std::shared_ptr<Buffer> metadata_;
/// Whether this class holds a data copy.
bool has_data_copy_;
};
} // namespace ray
#endif // RAY_COMMON_RAY_OBJECT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/scheduling/cluster_resource_scheduler.cc
|
C++
|
#include "cluster_resource_scheduler.h"
std::string NodeResources::DebugString() {
std::stringstream buffer;
buffer << " node predefined resources {";
for (size_t i = 0; i < this->capacities.size(); i++) {
buffer << "(" << this->capacities[i].total << ":" << this->capacities[i].available
<< ") ";
}
buffer << "}" << std::endl;
buffer << " node custom resources {";
for (auto it = this->custom_resources.begin(); it != this->custom_resources.end();
++it) {
buffer << it->first << ":(" << it->second.total << ":" << it->second.available
<< ") ";
}
buffer << "}" << std::endl;
return buffer.str();
}
std::string TaskRequest::DebugString() {
std::stringstream buffer;
buffer << std::endl << " request predefined resources {";
for (size_t i = 0; i < this->predefined_resources.size(); i++) {
buffer << "(" << this->predefined_resources[i].demand << ":"
<< this->predefined_resources[i].soft << ") ";
}
buffer << "}" << std::endl;
buffer << " request custom resources {";
for (size_t i = 0; i < this->custom_resources.size(); i++) {
buffer << this->custom_resources[i].id << ":"
<< "(" << this->custom_resources[i].req.demand << ":"
<< this->custom_resources[i].req.soft << ") ";
}
buffer << "}" << std::endl;
return buffer.str();
}
bool NodeResources::operator==(const NodeResources &other) {
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
if (this->capacities[i].total != other.capacities[i].total) {
return false;
}
if (this->capacities[i].available != other.capacities[i].available) {
return false;
}
}
if (this->custom_resources.size() != other.custom_resources.size()) {
return true;
}
for (auto it1 = this->custom_resources.begin(); it1 != this->custom_resources.end();
++it1) {
auto it2 = other.custom_resources.find(it1->first);
if (it2 == other.custom_resources.end()) {
return false;
}
if (it1->second.total != it2->second.total) {
return false;
}
if (it1->second.available != it2->second.available) {
return false;
}
}
return true;
}
ClusterResourceScheduler::ClusterResourceScheduler(
int64_t local_node_id, const NodeResources &local_node_resources)
: local_node_id_(local_node_id) {
AddOrUpdateNode(local_node_id_, local_node_resources);
}
ClusterResourceScheduler::ClusterResourceScheduler(
const std::string &local_node_id,
const std::unordered_map<std::string, double> &local_node_resources) {
local_node_id_ = string_to_int_map_.Insert(local_node_id);
AddOrUpdateNode(local_node_id, local_node_resources, local_node_resources);
}
void ClusterResourceScheduler::AddOrUpdateNode(
const std::string &node_id,
const std::unordered_map<std::string, double> &resources_total,
const std::unordered_map<std::string, double> &resources_available) {
NodeResources node_resources;
ResourceMapToNodeResources(resources_total, resources_available, &node_resources);
AddOrUpdateNode(string_to_int_map_.Insert(node_id), node_resources);
}
void ClusterResourceScheduler::SetPredefinedResources(const NodeResources &new_resources,
NodeResources *old_resources) {
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
old_resources->capacities[i].total = new_resources.capacities[i].total;
old_resources->capacities[i].available = new_resources.capacities[i].available;
}
}
void ClusterResourceScheduler::SetCustomResources(
const absl::flat_hash_map<int64_t, ResourceCapacity> &new_custom_resources,
absl::flat_hash_map<int64_t, ResourceCapacity> *old_custom_resources) {
old_custom_resources->clear();
for (auto &elem : new_custom_resources) {
old_custom_resources->insert(elem);
}
}
void ClusterResourceScheduler::AddOrUpdateNode(int64_t node_id,
const NodeResources &node_resources) {
auto it = nodes_.find(node_id);
if (it == nodes_.end()) {
// This node is new, so add it to the map.
nodes_.emplace(node_id, node_resources);
} else {
// This node exists, so update its resources.
NodeResources &resources = it->second;
SetPredefinedResources(node_resources, &resources);
SetCustomResources(node_resources.custom_resources, &resources.custom_resources);
}
}
bool ClusterResourceScheduler::RemoveNode(int64_t node_id) {
auto it = nodes_.find(node_id);
if (it == nodes_.end()) {
// Node not found.
return false;
} else {
it->second.custom_resources.clear();
nodes_.erase(it);
string_to_int_map_.Remove(node_id);
return true;
}
}
int64_t ClusterResourceScheduler::IsSchedulable(const TaskRequest &task_req,
int64_t node_id,
const NodeResources &resources) {
int violations = 0;
// First, check predefined resources.
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
if (task_req.predefined_resources[i].demand > resources.capacities[i].available) {
if (task_req.predefined_resources[i].soft) {
// A soft constraint has been violated.
violations++;
} else {
// A hard constraint has been violated.
return -1;
}
}
}
for (size_t i = 0; i < task_req.custom_resources.size(); i++) {
auto it = resources.custom_resources.find(task_req.custom_resources[i].id);
if (it == resources.custom_resources.end()) {
// Requested resource doesn't exist at this node.
if (task_req.custom_resources[i].req.soft) {
violations++;
} else {
return -1;
}
} else {
if (task_req.custom_resources[i].req.demand > it->second.available) {
// Resource constraint is violated.
if (task_req.custom_resources[i].req.soft) {
violations++;
} else {
return -1;
}
}
}
}
if (task_req.placement_hints.size() > 0) {
auto it_p = task_req.placement_hints.find(node_id);
if (it_p == task_req.placement_hints.end()) {
// Node not found in the placement_hints list, so
// record this a soft constraint violation.
violations++;
}
}
return violations;
}
int64_t ClusterResourceScheduler::GetBestSchedulableNode(const TaskRequest &task_req,
int64_t *total_violations) {
// Min number of violations across all nodes that can schedule the request.
int64_t min_violations = INT_MAX;
// Node associated to min_violations.
int64_t best_node = -1;
*total_violations = 0;
// Check whether local node is schedulable. We return immediately
// the local node only if there are zero violations.
auto it = nodes_.find(local_node_id_);
if (it != nodes_.end()) {
if (IsSchedulable(task_req, it->first, it->second) == 0) {
return local_node_id_;
}
}
// Check whether any node in the request placement_hints, satisfes
// all resource constraints of the request.
for (auto it_p = task_req.placement_hints.begin();
it_p != task_req.placement_hints.end(); ++it_p) {
auto it = nodes_.find(*it_p);
if (it != nodes_.end()) {
if (IsSchedulable(task_req, it->first, it->second) == 0) {
return it->first;
}
}
}
for (auto it = nodes_.begin(); it != nodes_.end(); ++it) {
// Return -1 if node not schedulable. otherwise return the number
// of soft constraint violations.
int64_t violations;
if ((violations = IsSchedulable(task_req, it->first, it->second)) == -1) {
continue;
}
// Update the node with the smallest number of soft constraints violated.
if (min_violations > violations) {
min_violations = violations;
best_node = it->first;
}
if (violations == 0) {
*total_violations = 0;
return best_node;
}
}
*total_violations = min_violations;
return best_node;
}
std::string ClusterResourceScheduler::GetBestSchedulableNode(
const std::unordered_map<std::string, double> &task_resources,
int64_t *total_violations) {
TaskRequest task_request;
ResourceMapToTaskRequest(task_resources, &task_request);
int64_t node_id = GetBestSchedulableNode(task_request, total_violations);
std::string id_string;
if (node_id == -1) {
return "";
}
return string_to_int_map_.Get(node_id);
}
bool ClusterResourceScheduler::SubtractNodeAvailableResources(
int64_t node_id, const TaskRequest &task_req) {
auto it = nodes_.find(node_id);
if (it == nodes_.end()) {
return false;
}
NodeResources &resources = it->second;
// Just double check this node can still schedule the task request.
if (IsSchedulable(task_req, local_node_id_, resources) == -1) {
return false;
}
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
resources.capacities[i].available =
std::max(static_cast<int64_t>(0), resources.capacities[i].available -
task_req.predefined_resources[i].demand);
}
for (size_t i = 0; i < task_req.custom_resources.size(); i++) {
auto it = resources.custom_resources.find(task_req.custom_resources[i].id);
if (it != resources.custom_resources.end()) {
it->second.available =
std::max(static_cast<int64_t>(0),
it->second.available - task_req.custom_resources[i].req.demand);
}
}
return true;
}
bool ClusterResourceScheduler::SubtractNodeAvailableResources(
const std::string &node_id,
const std::unordered_map<std::string, double> &resource_map) {
TaskRequest task_request;
ResourceMapToTaskRequest(resource_map, &task_request);
return SubtractNodeAvailableResources(string_to_int_map_.Get(node_id), task_request);
}
bool ClusterResourceScheduler::AddNodeAvailableResources(int64_t node_id,
const TaskRequest &task_req) {
auto it = nodes_.find(node_id);
if (it == nodes_.end()) {
return false;
}
NodeResources &resources = it->second;
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
resources.capacities[i].available =
resources.capacities[i].available + task_req.predefined_resources[i].demand;
}
for (size_t i = 0; i < task_req.custom_resources.size(); i++) {
auto it = resources.custom_resources.find(task_req.custom_resources[i].id);
if (it != resources.custom_resources.end()) {
it->second.available =
it->second.available + task_req.custom_resources[i].req.demand;
}
}
return true;
}
bool ClusterResourceScheduler::AddNodeAvailableResources(
const std::string &node_id,
const std::unordered_map<std::string, double> &resource_map) {
TaskRequest task_request;
ResourceMapToTaskRequest(resource_map, &task_request);
return AddNodeAvailableResources(string_to_int_map_.Get(node_id), task_request);
}
bool ClusterResourceScheduler::GetNodeResources(int64_t node_id,
NodeResources *ret_resources) {
auto it = nodes_.find(node_id);
if (it != nodes_.end()) {
*ret_resources = it->second;
return true;
} else {
return false;
}
}
int64_t ClusterResourceScheduler::NumNodes() { return nodes_.size(); }
void ClusterResourceScheduler::ResourceMapToNodeResources(
const std::unordered_map<std::string, double> &resource_map_total,
const std::unordered_map<std::string, double> &resource_map_available,
NodeResources *node_resources) {
node_resources->capacities.resize(PredefinedResources_MAX);
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
node_resources->capacities[i].total = node_resources->capacities[i].available = 0;
}
for (auto it = resource_map_total.begin(); it != resource_map_total.end(); ++it) {
ResourceCapacity resource_capacity;
resource_capacity.total = (int64_t)it->second;
auto it2 = resource_map_available.find(it->first);
if (it2 == resource_map_available.end()) {
resource_capacity.available = 0;
} else {
resource_capacity.available = (int64_t)it2->second;
}
if (it->first == ray::kCPU_ResourceLabel) {
node_resources->capacities[CPU] = resource_capacity;
} else if (it->first == ray::kGPU_ResourceLabel) {
node_resources->capacities[GPU] = resource_capacity;
} else if (it->first == ray::kTPU_ResourceLabel) {
node_resources->capacities[TPU] = resource_capacity;
} else if (it->first == ray::kMemory_ResourceLabel) {
node_resources->capacities[MEM] = resource_capacity;
} else {
// This is a custom resource.
node_resources->custom_resources.emplace(string_to_int_map_.Insert(it->first),
resource_capacity);
}
}
}
void ClusterResourceScheduler::ResourceMapToTaskRequest(
const std::unordered_map<std::string, double> &resource_map,
TaskRequest *task_request) {
size_t i = 0;
task_request->predefined_resources.resize(PredefinedResources_MAX);
task_request->custom_resources.resize(resource_map.size());
for (size_t i = 0; i < PredefinedResources_MAX; i++) {
task_request->predefined_resources[0].demand = 0;
task_request->predefined_resources[0].soft = false;
}
for (auto it = resource_map.begin(); it != resource_map.end(); ++it) {
if (it->first == ray::kCPU_ResourceLabel) {
task_request->predefined_resources[CPU].demand = it->second;
} else if (it->first == ray::kGPU_ResourceLabel) {
task_request->predefined_resources[GPU].demand = it->second;
} else if (it->first == ray::kTPU_ResourceLabel) {
task_request->predefined_resources[TPU].demand = it->second;
} else if (it->first == ray::kMemory_ResourceLabel) {
task_request->predefined_resources[MEM].demand = it->second;
} else {
task_request->custom_resources[i].id = string_to_int_map_.Insert(it->first);
task_request->custom_resources[i].req.demand = it->second;
task_request->custom_resources[i].req.soft = false;
i++;
}
}
task_request->custom_resources.resize(i);
}
void ClusterResourceScheduler::UpdateResourceCapacity(const std::string &client_id_string,
const std::string &resource_name,
int64_t resource_total) {
int64_t client_id = string_to_int_map_.Get(client_id_string);
auto it = nodes_.find(client_id);
if (it == nodes_.end()) {
return;
}
int idx = -1;
if (resource_name == ray::kCPU_ResourceLabel) {
idx = (int)CPU;
} else if (resource_name == ray::kGPU_ResourceLabel) {
idx = (int)GPU;
} else if (resource_name == ray::kTPU_ResourceLabel) {
idx = (int)TPU;
} else if (resource_name == ray::kMemory_ResourceLabel) {
idx = (int)MEM;
};
if (idx != -1) {
int64_t diff_capacity = resource_total - it->second.capacities[idx].total;
it->second.capacities[idx].total += diff_capacity;
it->second.capacities[idx].available += diff_capacity;
if (it->second.capacities[idx].available < 0) {
it->second.capacities[idx].available = 0;
}
if (it->second.capacities[idx].total < 0) {
it->second.capacities[idx].total = 0;
}
} else {
int64_t resource_id = string_to_int_map_.Insert(resource_name);
auto itr = it->second.custom_resources.find(resource_id);
if (itr != it->second.custom_resources.end()) {
int64_t diff_capacity = resource_total - itr->second.total;
itr->second.total += diff_capacity;
itr->second.available += diff_capacity;
if (itr->second.available < 0) {
itr->second.available = 0;
}
if (itr->second.total < 0) {
itr->second.total = 0;
}
}
ResourceCapacity resource_capacity;
resource_capacity.total = resource_capacity.available = resource_total;
it->second.custom_resources.emplace(resource_id, resource_capacity);
}
}
void ClusterResourceScheduler::DeleteResource(const std::string &client_id_string,
const std::string &resource_name) {
int64_t client_id = string_to_int_map_.Get(client_id_string);
auto it = nodes_.find(client_id);
if (it == nodes_.end()) {
return;
}
int idx = -1;
if (resource_name == ray::kCPU_ResourceLabel) {
idx = (int)CPU;
} else if (resource_name == ray::kGPU_ResourceLabel) {
idx = (int)GPU;
} else if (resource_name == ray::kTPU_ResourceLabel) {
idx = (int)TPU;
} else if (resource_name == ray::kMemory_ResourceLabel) {
idx = (int)MEM;
};
if (idx != -1) {
it->second.capacities[idx].total = 0;
} else {
int64_t resource_id = string_to_int_map_.Get(resource_name);
auto itr = it->second.custom_resources.find(resource_id);
if (itr != it->second.custom_resources.end()) {
string_to_int_map_.Remove(resource_id);
it->second.custom_resources.erase(itr);
}
}
}
std::string ClusterResourceScheduler::DebugString(void) {
std::stringstream buffer;
buffer << std::endl << "local node id: " << local_node_id_ << std::endl;
for (auto it = nodes_.begin(); it != nodes_.end(); ++it) {
buffer << "node id: " << it->first << std::endl;
buffer << it->second.DebugString();
}
return buffer.str();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/scheduling/cluster_resource_scheduler.h
|
C/C++ Header
|
#ifndef RAY_COMMON_SCHEDULING_SCHEDULING_H
#define RAY_COMMON_SCHEDULING_SCHEDULING_H
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "ray/common/scheduling/scheduling_ids.h"
#include "ray/common/task/scheduling_resources.h"
#include "ray/util/logging.h"
#include <iostream>
#include <sstream>
#include <vector>
/// List of predefined resources.
enum PredefinedResources { CPU, MEM, GPU, TPU, PredefinedResources_MAX };
struct ResourceCapacity {
int64_t total;
int64_t available;
};
struct ResourceRequest {
/// Amount of resource being requested.
int64_t demand;
/// Specify whether the request is soft or hard.
/// If hard, the entire request is denied if the demand exceeds the resource
/// availability. Otherwise, the request can be still be granted.
/// Prefernces are given to the nodes with the lowest number of violations.
bool soft;
};
/// Resource request, including resource ID. This is used for custom resources.
struct ResourceRequestWithId {
/// Resource ID.
int64_t id;
/// Resource request.
ResourceRequest req;
};
struct NodeResources {
/// Available and total capacities for predefined resources.
std::vector<ResourceCapacity> capacities;
/// Map containing custom resources. The key of each entry represents the
/// custom resource ID.
absl::flat_hash_map<int64_t, ResourceCapacity> custom_resources;
/// Returns if this equals another node resources.
bool operator==(const NodeResources &other);
/// Returns human-readable string for these resources.
std::string DebugString();
};
struct TaskRequest {
/// List of predefined resources required by the task.
std::vector<ResourceRequest> predefined_resources;
/// List of custom resources required by the tasl.
std::vector<ResourceRequestWithId> custom_resources;
/// List of placement hints. A placement hint is a node on which
/// we desire to run this task. This is a soft constraint in that
/// the task will run on a different node in the cluster, if none of the
/// nodes in this list can schedule this task.
absl::flat_hash_set<int64_t> placement_hints;
/// Returns human-readable string for this task request.
std::string DebugString();
};
/// Class encapsulating the cluster resources and the logic to assign
/// tasks to nodes based on the task's constraints and the available
/// resources at those nodes.
class ClusterResourceScheduler {
/// List of nodes in the clusters and their resources organized as a map.
/// The key of the map is the node ID.
absl::flat_hash_map<int64_t, NodeResources> nodes_;
/// ID of local node.
int64_t local_node_id_;
/// Keep the mapping between node and resource IDs in string representation
/// to integer representation. Used for improving map performance.
StringIdMap string_to_int_map_;
/// Set predefined resources.
///
/// \param[in] new_resources: New predefined resources.
/// \param[out] old_resources: Predefined resources to be updated.
void SetPredefinedResources(const NodeResources &new_resources,
NodeResources *old_resources);
/// Set custom resources.
///
/// \param[in] new_resources: New custom resources.
/// \param[out] old_resources: Custom resources to be updated.
void SetCustomResources(
const absl::flat_hash_map<int64_t, ResourceCapacity> &new_custom_resources,
absl::flat_hash_map<int64_t, ResourceCapacity> *old_custom_resources);
/// Returns human-readable string for this scheduler.
std::string DebugString();
public:
ClusterResourceScheduler(void){};
/// Constructor initializing the resources associated with the local node.
///
/// \param local_node_id: ID of local node,
/// \param local_node_resources: The total and the available resources associated
/// with the local node.
ClusterResourceScheduler(int64_t local_node_id,
const NodeResources &local_node_resources);
ClusterResourceScheduler(
const std::string &local_node_id,
const std::unordered_map<std::string, double> &local_node_resources);
/// Add a new node or overwrite the resources of an existing node.
///
/// \param node_id: Node ID.
/// \param node_resources: Up to date total and available resources of the node.
void AddOrUpdateNode(int64_t node_id, const NodeResources &node_resources);
void AddOrUpdateNode(
const std::string &node_id,
const std::unordered_map<std::string, double> &resource_map_total,
const std::unordered_map<std::string, double> &resource_map_available);
/// Remove node from the cluster data structure. This happens
/// when a node fails or it is removed from the cluster.
///
/// \param ID of the node to be removed.
bool RemoveNode(int64_t node_id);
/// Check whether a task request can be scheduled given a node.
///
/// \param task_req: Task request to be scheduled.
/// \param node_id: ID of the node.
/// \param resources: Node's resources. (Note: Technically, this is
/// redundant, as we can get the node's resources from nodes_
/// using node_id. However, typically both node_id and resources
/// are available when we call this function, and this way we avoid
/// a map find call which could be expensive.)
///
/// \return: -1, if the request cannot be scheduled. This happens when at
/// least a hard constraints is violated.
/// >= 0, the number soft constraint violations. If 0, no
/// constraint is violated.
int64_t IsSchedulable(const TaskRequest &task_req, int64_t node_id,
const NodeResources &resources);
/// Find a node in the cluster on which we can schedule a given task request.
///
/// First, this function checks whether the local node can schedule
/// the request without violating any constraints. If yes, it returns the
/// ID of the local node.
///
/// If not, this function checks whether there is another node in the cluster
/// that satisfies all request's constraints (both soft and hard).
///
/// If no such node exists, the function checks whether there are nodes
/// that satisfy all the request's hard constraints, but might violate some
/// soft constraints. Among these nodes, it returns a node which violates
/// the least number of soft constraints.
///
/// Finally, if no such node exists, return -1.
///
/// \param task_request: Task to be scheduled.
/// \param violations: The number of soft constraint violations associated
/// with the node returned by this function (assuming
/// a node that can schedule task_req is found).
///
/// \return -1, if no node can schedule the current request; otherwise,
/// return the ID of a node that can schedule the task request.
int64_t GetBestSchedulableNode(const TaskRequest &task_request, int64_t *violations);
/// Similar to
/// int64_t GetBestSchedulableNode(const TaskRequest &task_request, int64_t
/// *violations)
/// but the return value is different:
/// \return "", if no node can schedule the current request; otherwise,
/// return the ID in string format of a node that can schedule the
// task request.
std::string GetBestSchedulableNode(
const std::unordered_map<std::string, double> &task_request, int64_t *violations);
/// Decrease the available resources of a node when a task request is
/// scheduled on the given node.
///
/// \param node_id: ID of node on which request is being scheduled.
/// \param task_req: task request being scheduled.
///
/// \return true, if task_req can be indeed scheduled on the node,
/// and false otherwise.
bool SubtractNodeAvailableResources(int64_t node_id, const TaskRequest &task_request);
bool SubtractNodeAvailableResources(
const std::string &node_id,
const std::unordered_map<std::string, double> &task_request);
/// Increase available resources of a node when a worker has Finished
/// a task.
///
/// \param node_id: ID of node on which request is being scheduled.
/// \param task_request: resource requests of the task finishing execution.
///
/// \return true, if task_req can be indeed scheduled on the node,
/// and false otherwise.
bool AddNodeAvailableResources(int64_t node_id, const TaskRequest &task_request);
bool AddNodeAvailableResources(
const std::string &node_id,
const std::unordered_map<std::string, double> &task_request);
/// Return resources associated to the given node_id in ret_resources.
/// If node_id not found, return false; otherwise return true.
bool GetNodeResources(int64_t node_id, NodeResources *ret_resources);
/// Get number of nodes in the cluster.
int64_t NumNodes();
/// Convert a map of resources to a TaskRequest data structure.
void ResourceMapToTaskRequest(
const std::unordered_map<std::string, double> &resource_map,
TaskRequest *task_request);
/// Convert a map of resources to a TaskRequest data structure.
void ResourceMapToNodeResources(
const std::unordered_map<std::string, double> &resource_map_total,
const std::unordered_map<std::string, double> &resource_map_available,
NodeResources *node_resources);
/// Update total capacity of resource resource_name at node client_id.
void UpdateResourceCapacity(const std::string &client_id,
const std::string &resource_name, int64_t resource_total);
/// Delete resource resource_name from node cleint_id_string.
void DeleteResource(const std::string &client_id_string,
const std::string &resource_name);
/// Check whether two node resources are identical.
bool EqualNodeResources(const NodeResources &node_resources1,
const NodeResources &node_resources2);
};
#endif // RAY_COMMON_SCHEDULING_SCHEDULING_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/scheduling/scheduling_ids.cc
|
C++
|
#include "scheduling_ids.h"
int64_t StringIdMap::Get(const std::string &string_id) {
auto it = string_to_int_.find(string_id);
if (it == string_to_int_.end()) {
return -1;
} else {
return it->second;
}
};
std::string StringIdMap::Get(uint64_t id) {
std::string id_string;
auto it = int_to_string_.find(id);
if (it == int_to_string_.end()) {
id_string = "-1";
} else {
id_string = it->second;
}
return id_string;
};
int64_t StringIdMap::Insert(const std::string &string_id, uint8_t max_id) {
auto sit = string_to_int_.find(string_id);
if (sit == string_to_int_.end()) {
int64_t id = hasher_(string_id);
if (max_id != 0) {
id = id % MAX_ID_TEST;
}
for (size_t i = 0; true; i++) {
auto it = int_to_string_.find(id);
if (it == int_to_string_.end()) {
/// No hash collision, so associate string_id with id.
string_to_int_.emplace(string_id, id);
int_to_string_.emplace(id, string_id);
break;
}
id = hasher_(string_id + std::to_string(i));
if (max_id != 0) {
id = id % max_id;
}
}
return id;
} else {
return sit->second;
}
};
void StringIdMap::Remove(const std::string &string_id) {
auto sit = string_to_int_.find(string_id);
if (sit != string_to_int_.end()) {
int_to_string_.erase(string_to_int_[string_id]);
string_to_int_.erase(sit);
}
};
void StringIdMap::Remove(int64_t id) {
auto it = int_to_string_.find(id);
if (it != int_to_string_.end()) {
string_to_int_.erase(int_to_string_[id]);
int_to_string_.erase(it);
}
};
int64_t StringIdMap::Count() { return string_to_int_.size(); }
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/scheduling/scheduling_ids.h
|
C/C++ Header
|
#ifndef RAY_COMMON_SCHEDULING_SCHEDULING_IDS_H
#define RAY_COMMON_SCHEDULING_SCHEDULING_IDS_H
#include "absl/container/flat_hash_map.h"
#include "ray/util/logging.h"
#include <string>
/// Limit the ID range to test for collisions.
#define MAX_ID_TEST 8
/// Class to map string IDs to unique integer IDs and back.
class StringIdMap {
absl::flat_hash_map<std::string, int64_t> string_to_int_;
absl::flat_hash_map<int64_t, std::string> int_to_string_;
std::hash<std::string> hasher_;
public:
StringIdMap(){};
~StringIdMap(){};
/// Get integer ID associated with an existing string ID.
///
/// \param String ID.
/// \return The integer ID associated with the given string ID.
int64_t Get(const std::string &string_id);
/// Get string ID associated with an existing integer ID.
///
/// \param Integre ID.
/// \return The string ID associated with the given integer ID.
std::string Get(uint64_t id);
/// Insert a string ID and get the associated integer ID.
///
/// \param String ID to be inserted.
/// \param max_id The number of unique possible ids. This is used
/// to force collisions for testing. If -1, it is not used.
/// \return The integer ID associated with string ID string_id.
int64_t Insert(const std::string &string_id, uint8_t num_ids = 0);
/// Delete an ID identified by its string format.
///
/// \param ID to be deleted.
void Remove(const std::string &string_id);
/// Delete an ID identified by its integer format.
///
/// \param ID to be deleted.
void Remove(int64_t id);
/// Get number of identifiers.
int64_t Count();
};
#endif // RAY_COMMON_SCHEDULING_SCHEDULING_IDS_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/scheduling/scheduling_test.cc
|
C++
|
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <string>
#include "ray/common/scheduling/cluster_resource_scheduler.h"
#include "ray/common/scheduling/scheduling_ids.h"
#ifdef UNORDERED_VS_ABSL_MAPS_EVALUATION
#include <chrono>
#include "absl/container/flat_hash_map.h"
#endif // UNORDERED_VS_ABSL_MAPS_EVALUATION
using namespace std;
/// Used to path empty vector argiuments.
vector<int64_t> EmptyIntVector;
vector<bool> EmptyBoolVector;
void initTaskRequest(TaskRequest &tr, vector<int64_t> &pred_demands,
vector<bool> &pred_soft, vector<int64_t> &cust_ids,
vector<int64_t> &cust_demands, vector<bool> &cust_soft,
vector<int64_t> &placement_hints) {
for (size_t i = 0; i < pred_demands.size(); i++) {
ResourceRequest rq;
rq.demand = pred_demands[i];
rq.soft = pred_soft[i];
tr.predefined_resources.push_back(rq);
}
for (size_t i = pred_demands.size(); i < PredefinedResources_MAX; i++) {
ResourceRequest rq;
rq.demand = 0;
rq.soft = 0;
tr.predefined_resources.push_back(rq);
}
for (size_t i = 0; i < cust_ids.size(); i++) {
ResourceRequestWithId rq;
rq.id = cust_ids[i];
rq.req.demand = cust_demands[i];
rq.req.soft = cust_soft[i];
tr.custom_resources.push_back(rq);
}
for (size_t i = 0; i < placement_hints.size(); i++) {
tr.placement_hints.insert(placement_hints[i]);
}
};
void initNodeResources(NodeResources &node, vector<int64_t> &pred_capacities,
vector<int64_t> &cust_ids, vector<int64_t> &cust_capacities) {
for (size_t i = 0; i < pred_capacities.size(); i++) {
ResourceCapacity rc;
rc.total = rc.available = pred_capacities[i];
node.capacities.push_back(rc);
}
if (pred_capacities.size() < PredefinedResources_MAX) {
for (int i = pred_capacities.size(); i < PredefinedResources_MAX; i++) {
ResourceCapacity rc;
rc.total = rc.available = 0;
node.capacities.push_back(rc);
}
}
ResourceCapacity rc;
for (size_t i = 0; i < cust_capacities.size(); i++) {
rc.total = rc.available = cust_capacities[i];
node.custom_resources.insert(pair<int64_t, ResourceCapacity>(cust_ids[i], rc));
}
}
void initCluster(ClusterResourceScheduler &cluster_resources, int n) {
vector<int64_t> pred_capacities;
vector<int64_t> cust_ids;
vector<int64_t> cust_capacities;
int i, k;
for (i = 0; i < n; i++) {
NodeResources node_resources;
for (k = 0; k < PredefinedResources_MAX; k++) {
if (rand() % 3 == 0) {
pred_capacities.push_back(0);
} else {
pred_capacities.push_back(rand() % 10);
}
}
int m = min(rand() % PredefinedResources_MAX, n);
int start = rand() % n;
for (k = 0; k < m; k++) {
cust_ids.push_back((start + k) % n);
cust_capacities.push_back(rand() % 10);
}
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
cluster_resources.AddOrUpdateNode(i, node_resources);
node_resources.custom_resources.clear();
}
}
bool nodeResourcesEqual(const NodeResources &nr1, const NodeResources &nr2) {
if (nr1.capacities.size() != nr2.capacities.size()) {
cout << nr1.capacities.size() << " " << nr2.capacities.size() << endl;
return false;
}
for (size_t i = 0; i < nr1.capacities.size(); i++) {
if (nr1.capacities[i].available != nr2.capacities[i].available) {
return false;
}
if (nr1.capacities[i].total != nr2.capacities[i].total) {
return false;
}
}
if (nr1.custom_resources.size() != nr2.custom_resources.size()) {
return false;
}
auto cr1 = nr1.custom_resources;
auto cr2 = nr2.custom_resources;
for (auto it1 = cr1.begin(); it1 != cr1.end(); ++it1) {
auto it2 = cr2.find(it1->first);
if (it2 == cr2.end()) {
return false;
}
if (it1->second.total != it2->second.total) {
return false;
}
if (it1->second.available != it2->second.available) {
return false;
}
}
return true;
}
namespace ray {
class SchedulingTest : public ::testing::Test {
public:
void SetUp() {}
void Shutdown() {}
};
TEST_F(SchedulingTest, SchedulingIdTest) {
StringIdMap ids;
hash<string> hasher;
size_t num = 10; // should be greater than 10.
for (size_t i = 0; i < num; i++) {
ids.Insert(to_string(i));
}
ASSERT_EQ(ids.Count(), num);
ids.Remove(to_string(1));
ASSERT_EQ(ids.Count(), num - 1);
ids.Remove(hasher(to_string(2)));
ASSERT_EQ(ids.Count(), num - 2);
ASSERT_TRUE(ids.Get(to_string(3)) == static_cast<int64_t>(hasher(to_string(3))));
ASSERT_TRUE(ids.Get(to_string(100)) == -1);
/// Test for handling collision.
StringIdMap short_ids;
uint8_t max_id = 8;
for (size_t i = 0; i < max_id; i++) {
int64_t id = short_ids.Insert(to_string(i), max_id);
ASSERT_TRUE(id < max_id);
}
ASSERT_EQ(short_ids.Count(), max_id);
}
TEST_F(SchedulingTest, SchedulingInitClusterTest) {
int num_nodes = 10;
ClusterResourceScheduler cluster_resources;
initCluster(cluster_resources, num_nodes);
ASSERT_EQ(cluster_resources.NumNodes(), num_nodes);
}
TEST_F(SchedulingTest, SchedulingDeleteClusterNodeTest) {
int num_nodes = 4;
int64_t remove_id = 2;
ClusterResourceScheduler cluster_resources;
initCluster(cluster_resources, num_nodes);
cluster_resources.RemoveNode(remove_id);
ASSERT_TRUE(num_nodes - 1 == cluster_resources.NumNodes());
}
TEST_F(SchedulingTest, SchedulingModifyClusterNodeTest) {
int num_nodes = 4;
int64_t update_id = 2;
ClusterResourceScheduler cluster_resources;
initCluster(cluster_resources, num_nodes);
NodeResources node_resources;
vector<int64_t> pred_capacities;
vector<int64_t> cust_ids;
vector<int64_t> cust_capacities;
int k;
for (k = 0; k < PredefinedResources_MAX; k++) {
if (rand() % 3 == 0) {
pred_capacities.push_back(0);
} else {
pred_capacities.push_back(rand() % 10);
}
}
int m = min(rand() % PredefinedResources_MAX, num_nodes);
int start = rand() % num_nodes;
for (k = 0; k < m; k++) {
cust_ids.push_back((start + k) % num_nodes);
cust_capacities.push_back(rand() % 10);
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
cluster_resources.AddOrUpdateNode(update_id, node_resources);
}
ASSERT_TRUE(num_nodes == cluster_resources.NumNodes());
}
TEST_F(SchedulingTest, SchedulingUpdateAvailableResourcesTest) {
/// Create cluster resources.
NodeResources node_resources;
vector<int64_t> pred_capacities{10, 5, 3};
vector<int64_t> cust_ids{1, 2};
vector<int64_t> cust_capacities{5, 5};
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
ClusterResourceScheduler cluster_resources(1, node_resources);
{
TaskRequest task_req;
#define PRED_CUSTOM_LEN 2
vector<int64_t> pred_demands{7, 7};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1, 2};
vector<int64_t> cust_demands{3, 10};
vector<bool> cust_soft{false, true};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations > 0);
NodeResources nr1, nr2;
ASSERT_TRUE(cluster_resources.GetNodeResources(node_id, &nr1));
cluster_resources.SubtractNodeAvailableResources(node_id, task_req);
ASSERT_TRUE(cluster_resources.GetNodeResources(node_id, &nr2));
for (size_t i = 0; i < PRED_CUSTOM_LEN; i++) {
int64_t t = nr1.capacities[i].available - task_req.predefined_resources[i].demand;
if (t < 0) t = 0;
ASSERT_EQ(nr2.capacities[i].available, t);
}
for (size_t i = 0; i < PRED_CUSTOM_LEN; i++) {
auto it1 = nr1.custom_resources.find(task_req.custom_resources[i].id);
if (it1 != nr1.custom_resources.end()) {
auto it2 = nr2.custom_resources.find(task_req.custom_resources[i].id);
if (it2 != nr2.custom_resources.end()) {
int64_t t = it1->second.available - task_req.custom_resources[i].req.demand;
if (t < 0) t = 0;
ASSERT_EQ(it2->second.available, t);
}
}
}
}
}
TEST_F(SchedulingTest, SchedulingAddOrUpdateNodeTest) {
ClusterResourceScheduler cluster_resources;
NodeResources nr, nr_out;
int64_t node_id = 1;
/// Add node.
{
NodeResources node_resources;
vector<int64_t> pred_capacities{10, 5, 3};
vector<int64_t> cust_ids{1, 2};
vector<int64_t> cust_capacities{5, 5};
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
cluster_resources.AddOrUpdateNode(node_id, node_resources);
nr = node_resources;
}
/// Check whether node resources were correctly added.
if (cluster_resources.GetNodeResources(node_id, &nr_out)) {
ASSERT_TRUE(nodeResourcesEqual(nr, nr_out));
} else {
ASSERT_TRUE(false);
}
/// Update node.
{
NodeResources node_resources;
vector<int64_t> pred_capacities{10, 10};
vector<int64_t> cust_ids{2, 3};
vector<int64_t> cust_capacities{6, 6};
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
cluster_resources.AddOrUpdateNode(node_id, node_resources);
nr = node_resources;
}
if (cluster_resources.GetNodeResources(node_id, &nr_out)) {
ASSERT_TRUE(nodeResourcesEqual(nr, nr_out));
} else {
ASSERT_TRUE(false);
}
}
TEST_F(SchedulingTest, SchedulingTaskRequestTest) {
/// Create cluster resources containing local node.
NodeResources node_resources;
vector<int64_t> pred_capacities{5, 5};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_capacities{10};
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
ClusterResourceScheduler cluster_resources(0, node_resources);
{
NodeResources node_resources;
vector<int64_t> pred_capacities{10, 2, 3};
vector<int64_t> cust_ids{1, 2};
vector<int64_t> cust_capacities{5, 5};
initNodeResources(node_resources, pred_capacities, cust_ids, cust_capacities);
cluster_resources.AddOrUpdateNode(1, node_resources);
}
/// Predefined resources, hard constraint violation
{
TaskRequest task_req;
vector<int64_t> pred_demands = {11};
vector<bool> pred_soft = {false};
initTaskRequest(task_req, pred_demands, pred_soft, EmptyIntVector, EmptyIntVector,
EmptyBoolVector, EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_EQ(node_id, -1);
}
/// Predefined resources, soft constraint violation
{
TaskRequest task_req;
vector<int64_t> pred_demands = {11};
vector<bool> pred_soft = {true};
initTaskRequest(task_req, pred_demands, pred_soft, EmptyIntVector, EmptyIntVector,
EmptyBoolVector, EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations > 0);
}
/// Predefined resources, no constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands = {5};
vector<bool> pred_soft = {false};
initTaskRequest(task_req, pred_demands, pred_soft, EmptyIntVector, EmptyIntVector,
EmptyBoolVector, EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations == 0);
}
/// Custom resources, hard constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_demands{11};
vector<bool> cust_soft{false};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id == -1);
}
/// Custom resources, soft constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_demands{11};
vector<bool> cust_soft{true};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations > 0);
}
/// Custom resources, no constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_demands{5};
vector<bool> cust_soft{false};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations == 0);
}
/// Custom resource missing, hard constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{100};
vector<int64_t> cust_demands{5};
vector<bool> cust_soft{false};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id == -1);
}
/// Custom resource missing, soft constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{100};
vector<int64_t> cust_demands{5};
vector<bool> cust_soft{true};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
EmptyIntVector);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations > 0);
}
/// Placement_hints, soft constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_demands{5};
vector<bool> cust_soft{true};
vector<int64_t> placement_hints{2, 3};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
placement_hints);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations > 0);
}
/// Placement hints, no constraint violation.
{
TaskRequest task_req;
vector<int64_t> pred_demands{5, 2};
vector<bool> pred_soft{false, true};
vector<int64_t> cust_ids{1};
vector<int64_t> cust_demands{5};
vector<bool> cust_soft{true};
vector<int64_t> placement_hints{1, 2, 3};
initTaskRequest(task_req, pred_demands, pred_soft, cust_ids, cust_demands, cust_soft,
placement_hints);
int64_t violations;
int64_t node_id = cluster_resources.GetBestSchedulableNode(task_req, &violations);
ASSERT_TRUE(node_id != -1);
ASSERT_TRUE(violations == 0);
}
}
#ifdef UNORDERED_VS_ABSL_MAPS_EVALUATION
TEST_F(SchedulingTest, SchedulingMapPerformanceTest) {
size_t map_len = 1000000;
unordered_map<int64_t, int64_t> umap_int_key;
unordered_map<string, int64_t> umap_string_key;
absl::flat_hash_map<int64_t, int64_t> amap_int_key;
absl::flat_hash_map<string, int64_t> amap_string_key;
vector<string> search_key_strings;
vector<int64_t> search_key_ints;
for (size_t i = 0; i < map_len; i++) {
int id = rand() % map_len;
search_key_strings.push_back(to_string(id));
search_key_ints.push_back(id);
umap_int_key.emplace(i, i);
umap_string_key.emplace(to_string(i), i);
amap_int_key.emplace(i, i);
amap_string_key.emplace(to_string(i), i);
}
for (size_t i = 0; i < 25; i++) {
cout << "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" << endl;
}
int64_t sum;
auto t_start = std::chrono::high_resolution_clock::now();
sum = 0;
for (size_t i = 0; i < map_len; i++) {
auto it = umap_int_key.find(search_key_ints[i]);
if (it != umap_int_key.end()) {
sum += it->second;
}
}
auto t_end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double, std::milli>(t_end - t_start).count();
cout << "sum = " << sum << " in " << duration << endl;
t_start = std::chrono::high_resolution_clock::now();
sum = 0;
for (size_t i = 0; i < map_len; i++) {
auto it = umap_string_key.find(search_key_strings[i]);
if (it != umap_string_key.end()) {
sum += it->second;
}
}
t_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration<double, std::milli>(t_end - t_start).count();
cout << "sum = " << sum << " in " << duration << endl;
t_start = std::chrono::high_resolution_clock::now();
sum = 0;
for (size_t i = 0; i < map_len; i++) {
auto it = amap_int_key.find(search_key_ints[i]);
if (it != amap_int_key.end()) {
sum += it->second;
}
}
t_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration<double, std::milli>(t_end - t_start).count();
cout << "sum = " << sum << " in " << duration << endl;
t_start = std::chrono::high_resolution_clock::now();
sum = 0;
for (size_t i = 0; i < map_len; i++) {
auto it = amap_string_key.find(search_key_strings[i]);
if (it != amap_string_key.end()) {
sum += it->second;
}
}
t_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration<double, std::milli>(t_end - t_start).count();
cout << "sum = " << sum << " in " << duration << endl;
}
#endif // UNORDERED_VS_ABSL_MAPS_EVALUATION
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/status.cc
|
C++
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// A Status encapsulates the result of an operation. It may indicate success,
// or it may indicate an error with an associated error message.
//
// Multiple threads can invoke const methods on a Status without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Status must use
// external synchronization.
// Adapted from Apache Arrow, Apache Kudu, TensorFlow
#include "ray/common/status.h"
#include <assert.h>
namespace ray {
Status::Status(StatusCode code, const std::string &msg) {
assert(code != StatusCode::OK);
state_ = new State;
state_->code = code;
state_->msg = msg;
}
void Status::CopyFrom(const State *state) {
delete state_;
if (state == nullptr) {
state_ = nullptr;
} else {
state_ = new State(*state);
}
}
std::string Status::CodeAsString() const {
if (state_ == NULL) {
return "OK";
}
const char *type;
switch (code()) {
case StatusCode::OK:
type = "OK";
break;
case StatusCode::OutOfMemory:
type = "Out of memory";
break;
case StatusCode::KeyError:
type = "Key error";
break;
case StatusCode::TypeError:
type = "Type error";
break;
case StatusCode::Invalid:
type = "Invalid";
break;
case StatusCode::IOError:
type = "IOError";
break;
case StatusCode::ObjectExists:
type = "ObjectExists";
break;
case StatusCode::ObjectStoreFull:
type = "ObjectStoreFull";
break;
case StatusCode::UnknownError:
type = "Unknown error";
break;
case StatusCode::NotImplemented:
type = "NotImplemented";
break;
case StatusCode::RedisError:
type = "RedisError";
break;
case StatusCode::TimedOut:
type = "TimedOut";
break;
case StatusCode::Interrupted:
type = "Interrupted";
break;
default:
type = "Unknown";
break;
}
return std::string(type);
}
std::string Status::ToString() const {
std::string result(CodeAsString());
if (state_ == NULL) {
return result;
}
result += ": ";
result += state_->msg;
return result;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/status.h
|
C/C++ Header
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// A Status encapsulates the result of an operation. It may indicate success,
// or it may indicate an error with an associated error message.
//
// Multiple threads can invoke const methods on a Status without
// external synchronization, but if any of the threads may call a
// non-const method, all threads accessing the same Status must use
// external synchronization.
// Adapted from Apache Arrow, Apache Kudu, TensorFlow
#ifndef RAY_STATUS_H_
#define RAY_STATUS_H_
#include <cstring>
#include <iosfwd>
#include <string>
#include "ray/util/logging.h"
#include "ray/util/macros.h"
#include "ray/util/visibility.h"
// Return the given status if it is not OK.
#define RAY_RETURN_NOT_OK(s) \
do { \
::ray::Status _s = (s); \
if (RAY_PREDICT_FALSE(!_s.ok())) { \
return _s; \
} \
} while (0)
// If 'to_call' returns a bad status, CHECK immediately with a logged message
// of 'msg' followed by the status.
#define RAY_CHECK_OK_PREPEND(to_call, msg) \
do { \
::ray::Status _s = (to_call); \
RAY_CHECK(_s.ok()) << (msg) << ": " << _s.ToString(); \
} while (0)
// If the status is bad, CHECK immediately, appending the status to the
// logged message.
#define RAY_CHECK_OK(s) RAY_CHECK_OK_PREPEND(s, "Bad status")
// This macro is used to replace the "ARROW_CHECK_OK_PREPEND" macro.
#define RAY_ARROW_CHECK_OK_PREPEND(to_call, msg) \
do { \
::arrow::Status _s = (to_call); \
RAY_CHECK(_s.ok()) << (msg) << ": " << _s.ToString(); \
} while (0)
// This macro is used to replace the "ARROW_CHECK_OK" macro.
#define RAY_ARROW_CHECK_OK(s) RAY_ARROW_CHECK_OK_PREPEND(s, "Bad status")
// If arrow status is not ok, return a ray IOError status
// with the error message.
#define RAY_ARROW_RETURN_NOT_OK(s) \
do { \
::arrow::Status _s = (s); \
if (RAY_PREDICT_FALSE(!_s.ok())) { \
return ray::Status::IOError(_s.message()); \
; \
} \
} while (0)
namespace ray {
enum class StatusCode : char {
OK = 0,
OutOfMemory = 1,
KeyError = 2,
TypeError = 3,
Invalid = 4,
IOError = 5,
ObjectExists = 6,
ObjectStoreFull = 7,
UnknownError = 9,
NotImplemented = 10,
RedisError = 11,
TimedOut = 12,
Interrupted = 13,
IntentionalSystemExit = 14,
UnexpectedSystemExit = 15,
};
#if defined(__clang__)
// Only clang supports warn_unused_result as a type annotation.
class RAY_MUST_USE_RESULT RAY_EXPORT Status;
#endif
class RAY_EXPORT Status {
public:
// Create a success status.
Status() : state_(NULL) {}
~Status() { delete state_; }
Status(StatusCode code, const std::string &msg);
// Copy the specified status.
Status(const Status &s);
void operator=(const Status &s);
// Return a success status.
static Status OK() { return Status(); }
// Return error status of an appropriate type.
static Status OutOfMemory(const std::string &msg) {
return Status(StatusCode::OutOfMemory, msg);
}
static Status KeyError(const std::string &msg) {
return Status(StatusCode::KeyError, msg);
}
static Status TypeError(const std::string &msg) {
return Status(StatusCode::TypeError, msg);
}
static Status UnknownError(const std::string &msg) {
return Status(StatusCode::UnknownError, msg);
}
static Status NotImplemented(const std::string &msg) {
return Status(StatusCode::NotImplemented, msg);
}
static Status Invalid(const std::string &msg) {
return Status(StatusCode::Invalid, msg);
}
static Status IOError(const std::string &msg) {
return Status(StatusCode::IOError, msg);
}
static Status ObjectExists(const std::string &msg) {
return Status(StatusCode::ObjectExists, msg);
}
static Status ObjectStoreFull(const std::string &msg) {
return Status(StatusCode::ObjectStoreFull, msg);
}
static Status RedisError(const std::string &msg) {
return Status(StatusCode::RedisError, msg);
}
static Status TimedOut(const std::string &msg) {
return Status(StatusCode::TimedOut, msg);
}
static Status Interrupted(const std::string &msg) {
return Status(StatusCode::Interrupted, msg);
}
static Status IntentionalSystemExit() {
return Status(StatusCode::IntentionalSystemExit, "intentional system exit");
}
static Status UnexpectedSystemExit() {
return Status(StatusCode::UnexpectedSystemExit, "user code caused exit");
}
// Returns true iff the status indicates success.
bool ok() const { return (state_ == NULL); }
bool IsOutOfMemory() const { return code() == StatusCode::OutOfMemory; }
bool IsKeyError() const { return code() == StatusCode::KeyError; }
bool IsInvalid() const { return code() == StatusCode::Invalid; }
bool IsIOError() const { return code() == StatusCode::IOError; }
bool IsObjectExists() const { return code() == StatusCode::ObjectExists; }
bool IsObjectStoreFull() const { return code() == StatusCode::ObjectStoreFull; }
bool IsTypeError() const { return code() == StatusCode::TypeError; }
bool IsUnknownError() const { return code() == StatusCode::UnknownError; }
bool IsNotImplemented() const { return code() == StatusCode::NotImplemented; }
bool IsRedisError() const { return code() == StatusCode::RedisError; }
bool IsTimedOut() const { return code() == StatusCode::TimedOut; }
bool IsInterrupted() const { return code() == StatusCode::Interrupted; }
bool IsSystemExit() const {
return code() == StatusCode::IntentionalSystemExit ||
code() == StatusCode::UnexpectedSystemExit;
}
bool IsIntentionalSystemExit() const {
return code() == StatusCode::IntentionalSystemExit;
}
// Return a string representation of this status suitable for printing.
// Returns the string "OK" for success.
std::string ToString() const;
// Return a string representation of the status code, without the message
// text or posix code information.
std::string CodeAsString() const;
StatusCode code() const { return ok() ? StatusCode::OK : state_->code; }
std::string message() const { return ok() ? "" : state_->msg; }
private:
struct State {
StatusCode code;
std::string msg;
};
// OK status has a `NULL` state_. Otherwise, `state_` points to
// a `State` structure containing the error code and message(s)
State *state_;
void CopyFrom(const State *s);
};
static inline std::ostream &operator<<(std::ostream &os, const Status &x) {
os << x.ToString();
return os;
}
inline Status::Status(const Status &s)
: state_((s.state_ == NULL) ? NULL : new State(*s.state_)) {}
inline void Status::operator=(const Status &s) {
// The following condition catches both aliasing (when this == &s),
// and the common case where both s and *this are ok.
if (state_ != s.state_) {
CopyFrom(s.state_);
}
}
} // namespace ray
#endif // RAY_STATUS_H_
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/scheduling_resources.cc
|
C++
|
#include "scheduling_resources.h"
#include <cmath>
#include <sstream>
#include "ray/util/logging.h"
namespace ray {
FractionalResourceQuantity::FractionalResourceQuantity() { resource_quantity_ = 0; }
FractionalResourceQuantity::FractionalResourceQuantity(double resource_quantity) {
// We check for nonnegativeity due to the implicit conversion to
// FractionalResourceQuantity from ints/doubles when we do logical
// comparisons.
RAY_CHECK(resource_quantity >= 0)
<< "Resource capacity, " << resource_quantity << ", should be nonnegative.";
resource_quantity_ =
static_cast<int64_t>(resource_quantity * kResourceConversionFactor);
}
const FractionalResourceQuantity FractionalResourceQuantity::operator+(
const FractionalResourceQuantity &rhs) const {
FractionalResourceQuantity result = *this;
result += rhs;
return result;
}
const FractionalResourceQuantity FractionalResourceQuantity::operator-(
const FractionalResourceQuantity &rhs) const {
FractionalResourceQuantity result = *this;
result -= rhs;
return result;
}
void FractionalResourceQuantity::operator+=(const FractionalResourceQuantity &rhs) {
resource_quantity_ += rhs.resource_quantity_;
}
void FractionalResourceQuantity::operator-=(const FractionalResourceQuantity &rhs) {
resource_quantity_ -= rhs.resource_quantity_;
}
bool FractionalResourceQuantity::operator==(const FractionalResourceQuantity &rhs) const {
return resource_quantity_ == rhs.resource_quantity_;
}
bool FractionalResourceQuantity::operator!=(const FractionalResourceQuantity &rhs) const {
return !(*this == rhs);
}
bool FractionalResourceQuantity::operator<(const FractionalResourceQuantity &rhs) const {
return resource_quantity_ < rhs.resource_quantity_;
}
bool FractionalResourceQuantity::operator>(const FractionalResourceQuantity &rhs) const {
return rhs < *this;
}
bool FractionalResourceQuantity::operator<=(const FractionalResourceQuantity &rhs) const {
return !(*this > rhs);
}
bool FractionalResourceQuantity::operator>=(const FractionalResourceQuantity &rhs) const {
bool result = !(*this < rhs);
return result;
}
double FractionalResourceQuantity::ToDouble() const {
return static_cast<double>(resource_quantity_) / kResourceConversionFactor;
}
ResourceSet::ResourceSet() {}
ResourceSet::ResourceSet(
const std::unordered_map<std::string, FractionalResourceQuantity> &resource_map)
: resource_capacity_(resource_map) {
for (auto const &resource_pair : resource_map) {
RAY_CHECK(resource_pair.second > 0);
}
}
ResourceSet::ResourceSet(const std::unordered_map<std::string, double> &resource_map) {
for (auto const &resource_pair : resource_map) {
RAY_CHECK(resource_pair.second > 0);
resource_capacity_[resource_pair.first] =
FractionalResourceQuantity(resource_pair.second);
}
}
ResourceSet::ResourceSet(const std::vector<std::string> &resource_labels,
const std::vector<double> resource_capacity) {
RAY_CHECK(resource_labels.size() == resource_capacity.size());
for (size_t i = 0; i < resource_labels.size(); i++) {
RAY_CHECK(resource_capacity[i] > 0);
resource_capacity_[resource_labels[i]] =
FractionalResourceQuantity(resource_capacity[i]);
}
}
ResourceSet::~ResourceSet() {}
bool ResourceSet::operator==(const ResourceSet &rhs) const {
return (this->IsSubset(rhs) && rhs.IsSubset(*this));
}
bool ResourceSet::IsEmpty() const {
// Check whether the capacity of each resource type is zero. Exit early if not.
return resource_capacity_.empty();
}
bool ResourceSet::IsSubset(const ResourceSet &other) const {
// Check to make sure all keys of this are in other.
for (const auto &resource_pair : resource_capacity_) {
const auto &resource_name = resource_pair.first;
const FractionalResourceQuantity &lhs_quantity = resource_pair.second;
const FractionalResourceQuantity &rhs_quantity = other.GetResource(resource_name);
if (lhs_quantity > rhs_quantity) {
// Resource found in rhs, but lhs capacity exceeds rhs capacity.
return false;
}
}
return true;
}
/// Test whether this ResourceSet is a superset of the other ResourceSet
bool ResourceSet::IsSuperset(const ResourceSet &other) const {
return other.IsSubset(*this);
}
/// Test whether this ResourceSet is precisely equal to the other ResourceSet.
bool ResourceSet::IsEqual(const ResourceSet &rhs) const {
return (this->IsSubset(rhs) && rhs.IsSubset(*this));
}
void ResourceSet::AddOrUpdateResource(const std::string &resource_name,
const FractionalResourceQuantity &capacity) {
if (capacity > 0) {
resource_capacity_[resource_name] = capacity;
}
}
bool ResourceSet::DeleteResource(const std::string &resource_name) {
if (resource_capacity_.count(resource_name) == 1) {
resource_capacity_.erase(resource_name);
return true;
} else {
return false;
}
}
void ResourceSet::SubtractResources(const ResourceSet &other) {
// Subtract the resources, make sure none goes below zero and delete any if new capacity
// is zero.
for (const auto &resource_pair : other.GetResourceAmountMap()) {
const std::string &resource_label = resource_pair.first;
const FractionalResourceQuantity &resource_capacity = resource_pair.second;
if (resource_capacity_.count(resource_label) == 1) {
resource_capacity_[resource_label] -= resource_capacity;
}
if (resource_capacity_[resource_label] <= 0) {
resource_capacity_.erase(resource_label);
}
}
}
void ResourceSet::SubtractResourcesStrict(const ResourceSet &other) {
// Subtract the resources, make sure none goes below zero and delete any if new capacity
// is zero.
for (const auto &resource_pair : other.GetResourceAmountMap()) {
const std::string &resource_label = resource_pair.first;
const FractionalResourceQuantity &resource_capacity = resource_pair.second;
RAY_CHECK(resource_capacity_.count(resource_label) == 1)
<< "Attempt to acquire unknown resource: " << resource_label << " capacity "
<< resource_capacity.ToDouble();
resource_capacity_[resource_label] -= resource_capacity;
// Ensure that quantity is positive. Note, we have to have the check before
// erasing the object to make sure that it doesn't get added back.
RAY_CHECK(resource_capacity_[resource_label] >= 0)
<< "Capacity of resource after subtraction is negative, "
<< resource_capacity_[resource_label].ToDouble() << ".";
if (resource_capacity_[resource_label] == 0) {
resource_capacity_.erase(resource_label);
}
}
}
// Add a set of resources to the current set of resources subject to upper limits on
// capacity from the total_resource set
void ResourceSet::AddResourcesCapacityConstrained(const ResourceSet &other,
const ResourceSet &total_resources) {
const std::unordered_map<std::string, FractionalResourceQuantity> &total_resource_map =
total_resources.GetResourceAmountMap();
for (const auto &resource_pair : other.GetResourceAmountMap()) {
const std::string &to_add_resource_label = resource_pair.first;
const FractionalResourceQuantity &to_add_resource_capacity = resource_pair.second;
if (total_resource_map.count(to_add_resource_label) != 0) {
// If resource exists in total map, add to the local capacity map.
// If the new capacity will be greater the total capacity, set the new capacity to
// total capacity (capping to the total)
const FractionalResourceQuantity &total_capacity =
total_resource_map.at(to_add_resource_label);
resource_capacity_[to_add_resource_label] =
std::min(resource_capacity_[to_add_resource_label] + to_add_resource_capacity,
total_capacity);
} else {
// Resource does not exist in the total map, it probably got deleted from the total.
// Don't panic, do nothing and simply continue.
RAY_LOG(DEBUG) << "[AddResourcesCapacityConstrained] Resource "
<< to_add_resource_label
<< " not found in the total resource map. It probably got deleted, "
"not adding back to resource_capacity_.";
}
}
}
// Perform an outer join.
void ResourceSet::AddResources(const ResourceSet &other) {
for (const auto &resource_pair : other.GetResourceAmountMap()) {
const std::string &resource_label = resource_pair.first;
const FractionalResourceQuantity &resource_capacity = resource_pair.second;
resource_capacity_[resource_label] += resource_capacity;
}
}
FractionalResourceQuantity ResourceSet::GetResource(
const std::string &resource_name) const {
if (resource_capacity_.count(resource_name) == 0) {
return 0;
}
const FractionalResourceQuantity &capacity = resource_capacity_.at(resource_name);
return capacity;
}
const ResourceSet ResourceSet::GetNumCpus() const {
ResourceSet cpu_resource_set;
const FractionalResourceQuantity cpu_quantity = GetResource(kCPU_ResourceLabel);
if (cpu_quantity > 0) {
cpu_resource_set.resource_capacity_[kCPU_ResourceLabel] = cpu_quantity;
}
return cpu_resource_set;
}
const std::string format_resource(std::string resource_name, double quantity) {
if (resource_name == "object_store_memory" || resource_name == "memory") {
// Convert to 50MiB chunks and then to GiB
return std::to_string(quantity * (50 * 1024 * 1024) / (1024 * 1024 * 1024)) + " GiB";
}
return std::to_string(quantity);
}
const std::string ResourceSet::ToString() const {
if (resource_capacity_.size() == 0) {
return "{}";
} else {
std::string return_string = "";
auto it = resource_capacity_.begin();
// Convert the first element to a string.
if (it != resource_capacity_.end()) {
double resource_amount = (it->second).ToDouble();
return_string +=
"{" + it->first + ": " + format_resource(it->first, resource_amount) + "}";
it++;
}
// Add the remaining elements to the string (along with a comma).
for (; it != resource_capacity_.end(); ++it) {
double resource_amount = (it->second).ToDouble();
return_string +=
", {" + it->first + ": " + format_resource(it->first, resource_amount) + "}";
}
return return_string;
}
}
const std::unordered_map<std::string, double> ResourceSet::GetResourceMap() const {
std::unordered_map<std::string, double> result;
for (const auto resource_pair : resource_capacity_) {
result[resource_pair.first] = resource_pair.second.ToDouble();
}
return result;
};
const std::unordered_map<std::string, FractionalResourceQuantity>
&ResourceSet::GetResourceAmountMap() const {
return resource_capacity_;
};
/// ResourceIds class implementation
ResourceIds::ResourceIds() {}
ResourceIds::ResourceIds(double resource_quantity) {
RAY_CHECK(IsWhole(resource_quantity));
int64_t whole_quantity = resource_quantity;
whole_ids_.reserve(whole_quantity);
for (int64_t i = 0; i < whole_quantity; ++i) {
whole_ids_.push_back(i);
}
total_capacity_ = TotalQuantity();
decrement_backlog_ = 0;
}
ResourceIds::ResourceIds(const std::vector<int64_t> &whole_ids)
: whole_ids_(whole_ids), total_capacity_(whole_ids.size()), decrement_backlog_(0) {}
ResourceIds::ResourceIds(
const std::vector<std::pair<int64_t, FractionalResourceQuantity>> &fractional_ids)
: fractional_ids_(fractional_ids),
total_capacity_(TotalQuantity()),
decrement_backlog_(0) {}
ResourceIds::ResourceIds(
const std::vector<int64_t> &whole_ids,
const std::vector<std::pair<int64_t, FractionalResourceQuantity>> &fractional_ids)
: whole_ids_(whole_ids),
fractional_ids_(fractional_ids),
total_capacity_(TotalQuantity()),
decrement_backlog_(0) {}
bool ResourceIds::Contains(const FractionalResourceQuantity &resource_quantity) const {
if (resource_quantity >= 1) {
double whole_quantity = resource_quantity.ToDouble();
RAY_CHECK(IsWhole(whole_quantity));
return whole_ids_.size() >= whole_quantity;
} else {
if (whole_ids_.size() > 0) {
return true;
} else {
for (auto const &fractional_pair : fractional_ids_) {
if (fractional_pair.second >= resource_quantity) {
return true;
}
}
return false;
}
}
}
ResourceIds ResourceIds::Acquire(const FractionalResourceQuantity &resource_quantity) {
if (resource_quantity >= 1) {
// Handle the whole case.
double whole_quantity = resource_quantity.ToDouble();
RAY_CHECK(IsWhole(whole_quantity));
RAY_CHECK(static_cast<int64_t>(whole_ids_.size()) >=
static_cast<int64_t>(whole_quantity));
std::vector<int64_t> ids_to_return;
for (int64_t i = 0; i < whole_quantity; ++i) {
ids_to_return.push_back(whole_ids_.back());
whole_ids_.pop_back();
}
return ResourceIds(ids_to_return);
} else {
// Handle the fractional case.
for (auto &fractional_pair : fractional_ids_) {
if (fractional_pair.second >= resource_quantity) {
auto return_pair = std::make_pair(fractional_pair.first, resource_quantity);
fractional_pair.second -= resource_quantity;
// Remove the fractional pair if the new capacity is 0
if (fractional_pair.second == 0) {
std::swap(fractional_pair, fractional_ids_[fractional_ids_.size() - 1]);
fractional_ids_.pop_back();
}
return ResourceIds({return_pair});
}
}
// If we get here then there weren't enough available fractional IDs, so we
// need to use a whole ID.
RAY_CHECK(whole_ids_.size() > 0);
int64_t whole_id = whole_ids_.back();
whole_ids_.pop_back();
auto return_pair = std::make_pair(whole_id, resource_quantity);
// We cannot make use of the implicit conversion because ints have no
// operator-(const FractionalResourceQuantity&) function.
const FractionalResourceQuantity remaining_amount =
FractionalResourceQuantity(1) - resource_quantity;
fractional_ids_.push_back(std::make_pair(whole_id, remaining_amount));
return ResourceIds({return_pair});
}
}
void ResourceIds::Release(const ResourceIds &resource_ids) {
auto const &whole_ids_to_return = resource_ids.WholeIds();
int64_t return_resource_count = whole_ids_to_return.size();
if (return_resource_count > decrement_backlog_) {
// We are returning more resources than in the decrement backlog, thus set the backlog
// to zero and insert (count - decrement_backlog resources).
whole_ids_.insert(whole_ids_.end(), whole_ids_to_return.begin() + decrement_backlog_,
whole_ids_to_return.end());
decrement_backlog_ = 0;
} else {
// Do not insert back to whole_ids_. Instead just decrement backlog by the return
// count
decrement_backlog_ -= return_resource_count;
}
// Return the fractional IDs.
auto const &fractional_ids_to_return = resource_ids.FractionalIds();
for (auto const &fractional_pair_to_return : fractional_ids_to_return) {
int64_t resource_id = fractional_pair_to_return.first;
auto const &fractional_pair_it = std::find_if(
fractional_ids_.begin(), fractional_ids_.end(),
[resource_id](std::pair<int64_t, FractionalResourceQuantity> &fractional_pair) {
return fractional_pair.first == resource_id;
});
if (fractional_pair_it == fractional_ids_.end()) {
fractional_ids_.push_back(fractional_pair_to_return);
} else {
fractional_pair_it->second += fractional_pair_to_return.second;
RAY_CHECK(fractional_pair_it->second <= 1)
<< "Fractional Resource Id " << fractional_pair_it->first << " capacity is "
<< fractional_pair_it->second.ToDouble() << ". Should have been less than one.";
// If this makes the ID whole, then return it to the list of whole IDs.
if (fractional_pair_it->second == 1) {
if (decrement_backlog_ > 0) {
// There's a decrement backlog, do not add to whole_ids_
decrement_backlog_--;
} else {
whole_ids_.push_back(resource_id);
}
fractional_ids_.erase(fractional_pair_it);
}
}
}
}
ResourceIds ResourceIds::Plus(const ResourceIds &resource_ids) const {
ResourceIds resource_ids_to_return(whole_ids_, fractional_ids_);
resource_ids_to_return.Release(resource_ids);
return resource_ids_to_return;
}
const std::vector<int64_t> &ResourceIds::WholeIds() const { return whole_ids_; }
const std::vector<std::pair<int64_t, FractionalResourceQuantity>>
&ResourceIds::FractionalIds() const {
return fractional_ids_;
}
bool ResourceIds::TotalQuantityIsZero() const {
return whole_ids_.empty() && fractional_ids_.empty();
}
FractionalResourceQuantity ResourceIds::TotalQuantity() const {
FractionalResourceQuantity total_quantity =
FractionalResourceQuantity(whole_ids_.size());
for (auto const &fractional_pair : fractional_ids_) {
total_quantity += fractional_pair.second;
}
return total_quantity;
}
std::string ResourceIds::ToString() const {
std::string return_string = "Whole IDs: [";
for (auto const &whole_id : whole_ids_) {
return_string += std::to_string(whole_id) + ", ";
}
return_string += "], Fractional IDs: ";
for (auto const &fractional_pair : fractional_ids_) {
double fractional_amount = fractional_pair.second.ToDouble();
return_string += "(" + std::to_string(fractional_pair.first) + ", " +
std::to_string(fractional_amount) + "), ";
}
return_string += "]";
return return_string;
}
void ResourceIds::UpdateCapacity(int64_t new_capacity) {
// Assert the new capacity is positive for sanity
RAY_CHECK(new_capacity >= 0);
int64_t capacity_delta = new_capacity - total_capacity_.ToDouble();
if (capacity_delta < 0) {
DecreaseCapacity(-1 * capacity_delta);
} else {
IncreaseCapacity(capacity_delta);
}
}
void ResourceIds::IncreaseCapacity(int64_t increment_quantity) {
// Adjust with decrement_backlog_
int64_t actual_increment_quantity = 0;
actual_increment_quantity =
std::max<int64_t>(0, increment_quantity - decrement_backlog_);
decrement_backlog_ = std::max<int64_t>(0, decrement_backlog_ - increment_quantity);
if (actual_increment_quantity > 0) {
for (int i = 0; i < actual_increment_quantity; i++) {
whole_ids_.push_back(-1); // Dynamic resources are assigned resource id -1.
}
total_capacity_ += actual_increment_quantity;
}
}
void ResourceIds::DecreaseCapacity(int64_t decrement_quantity) {
// Get total quantity, but casting to int to truncate any fractional resources. Updates
// are supported only on whole resources.
int64_t available_quantity = TotalQuantity().ToDouble();
RAY_LOG(DEBUG) << "[DecreaseCapacity] Available quantity: " << available_quantity;
if (available_quantity < decrement_quantity) {
RAY_LOG(DEBUG) << "[DecreaseCapacity] Available quantity < decrement quantity "
<< decrement_quantity;
// We're trying to remove more resources than are available
// In this case, add the difference to the decrement backlog, and when resources are
// released the backlog will be cleared
decrement_backlog_ += (decrement_quantity - available_quantity);
// To decrease capacity, just acquire resources and forget about them. They are popped
// from whole_ids when acquired.
Acquire(available_quantity);
} else {
RAY_LOG(DEBUG) << "[DecreaseCapacity] Available quantity > decrement quantity "
<< decrement_quantity;
// Simply acquire resources if sufficient are available
Acquire(decrement_quantity);
}
total_capacity_ -= decrement_quantity;
}
bool ResourceIds::IsWhole(double resource_quantity) const {
int64_t whole_quantity = resource_quantity;
return whole_quantity == resource_quantity;
}
/// ResourceIdSet class implementation
ResourceIdSet::ResourceIdSet() {}
ResourceIdSet::ResourceIdSet(const ResourceSet &resource_set) {
for (auto const &resource_pair : resource_set.GetResourceMap()) {
auto const &resource_name = resource_pair.first;
double resource_quantity = resource_pair.second;
available_resources_[resource_name] = ResourceIds(resource_quantity);
}
}
ResourceIdSet::ResourceIdSet(
const std::unordered_map<std::string, ResourceIds> &available_resources)
: available_resources_(available_resources) {}
bool ResourceIdSet::Contains(const ResourceSet &resource_set) const {
for (auto const &resource_pair : resource_set.GetResourceAmountMap()) {
auto const &resource_name = resource_pair.first;
const FractionalResourceQuantity &resource_quantity = resource_pair.second;
auto it = available_resources_.find(resource_name);
if (it == available_resources_.end()) {
return false;
}
if (!it->second.Contains(resource_quantity)) {
return false;
}
}
return true;
}
ResourceIdSet ResourceIdSet::Acquire(const ResourceSet &resource_set) {
std::unordered_map<std::string, ResourceIds> acquired_resources;
for (auto const &resource_pair : resource_set.GetResourceAmountMap()) {
auto const &resource_name = resource_pair.first;
const FractionalResourceQuantity &resource_quantity = resource_pair.second;
auto it = available_resources_.find(resource_name);
RAY_CHECK(it != available_resources_.end());
acquired_resources[resource_name] = it->second.Acquire(resource_quantity);
if (it->second.TotalQuantityIsZero()) {
available_resources_.erase(it);
}
}
return ResourceIdSet(acquired_resources);
}
void ResourceIdSet::Release(const ResourceIdSet &resource_id_set) {
for (auto const &resource_pair : resource_id_set.AvailableResources()) {
auto const &resource_name = resource_pair.first;
auto const &resource_ids = resource_pair.second;
RAY_CHECK(!resource_ids.TotalQuantityIsZero());
auto it = available_resources_.find(resource_name);
if (it == available_resources_.end()) {
available_resources_[resource_name] = resource_ids;
} else {
it->second.Release(resource_ids);
}
}
}
void ResourceIdSet::ReleaseConstrained(const ResourceIdSet &resource_id_set,
const ResourceSet &resources_total) {
for (auto const &resource_pair : resource_id_set.AvailableResources()) {
auto const &resource_name = resource_pair.first;
// Release only if the resource exists in resources_total
if (resources_total.GetResource(resource_name) != 0) {
auto const &resource_ids = resource_pair.second;
RAY_CHECK(!resource_ids.TotalQuantityIsZero());
auto it = available_resources_.find(resource_name);
if (it == available_resources_.end()) {
available_resources_[resource_name] = resource_ids;
} else {
it->second.Release(resource_ids);
}
}
}
}
void ResourceIdSet::Clear() { available_resources_.clear(); }
ResourceIdSet ResourceIdSet::Plus(const ResourceIdSet &resource_id_set) const {
ResourceIdSet resource_id_set_to_return(available_resources_);
resource_id_set_to_return.Release(resource_id_set);
return resource_id_set_to_return;
}
void ResourceIdSet::AddOrUpdateResource(const std::string &resource_name,
int64_t capacity) {
auto it = available_resources_.find(resource_name);
if (it != available_resources_.end()) {
// If resource exists, update capacity
ResourceIds &resid = (it->second);
resid.UpdateCapacity(capacity);
} else {
// If resource does not exist, create
available_resources_[resource_name] = ResourceIds(capacity);
}
}
void ResourceIdSet::DeleteResource(const std::string &resource_name) {
available_resources_.erase(resource_name);
}
const std::unordered_map<std::string, ResourceIds> &ResourceIdSet::AvailableResources()
const {
return available_resources_;
}
ResourceIdSet ResourceIdSet::GetCpuResources() const {
std::unordered_map<std::string, ResourceIds> cpu_resources;
auto it = available_resources_.find(kCPU_ResourceLabel);
if (it != available_resources_.end()) {
cpu_resources.insert(*it);
}
return ResourceIdSet(cpu_resources);
}
ResourceSet ResourceIdSet::ToResourceSet() const {
std::unordered_map<std::string, FractionalResourceQuantity> resource_set;
for (auto const &resource_pair : available_resources_) {
resource_set[resource_pair.first] = resource_pair.second.TotalQuantity();
}
return ResourceSet(resource_set);
}
std::string ResourceIdSet::ToString() const {
std::string return_string = "AvailableResources: ";
auto it = available_resources_.begin();
// Convert the first element to a string.
if (it != available_resources_.end()) {
return_string += (it->first + ": {" + it->second.ToString() + "}");
it++;
}
// Add the remaining elements to the string (along with a comma).
for (; it != available_resources_.end(); ++it) {
return_string += (", " + it->first + ": {" + it->second.ToString() + "}");
}
return return_string;
}
std::vector<flatbuffers::Offset<protocol::ResourceIdSetInfo>> ResourceIdSet::ToFlatbuf(
flatbuffers::FlatBufferBuilder &fbb) const {
std::vector<flatbuffers::Offset<protocol::ResourceIdSetInfo>> return_message;
for (auto const &resource_pair : available_resources_) {
std::vector<int64_t> resource_ids;
std::vector<double> resource_fractions;
for (auto whole_id : resource_pair.second.WholeIds()) {
resource_ids.push_back(whole_id);
resource_fractions.push_back(1);
}
for (auto const &fractional_pair : resource_pair.second.FractionalIds()) {
resource_ids.push_back(fractional_pair.first);
resource_fractions.push_back(fractional_pair.second.ToDouble());
}
auto resource_id_set_message = protocol::CreateResourceIdSetInfo(
fbb, fbb.CreateString(resource_pair.first), fbb.CreateVector(resource_ids),
fbb.CreateVector(resource_fractions));
return_message.push_back(resource_id_set_message);
}
return return_message;
}
const std::string ResourceIdSet::Serialize() const {
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(protocol::CreateResourceIdSetInfos(fbb, fbb.CreateVector(ToFlatbuf(fbb))));
return std::string(fbb.GetBufferPointer(), fbb.GetBufferPointer() + fbb.GetSize());
}
/// SchedulingResources class implementation
SchedulingResources::SchedulingResources()
: resources_total_(ResourceSet()),
resources_available_(ResourceSet()),
resources_load_(ResourceSet()) {}
SchedulingResources::SchedulingResources(const ResourceSet &total)
: resources_total_(total),
resources_available_(total),
resources_load_(ResourceSet()) {}
SchedulingResources::~SchedulingResources() {}
const ResourceSet &SchedulingResources::GetAvailableResources() const {
return resources_available_;
}
void SchedulingResources::SetAvailableResources(ResourceSet &&newset) {
resources_available_ = newset;
}
const ResourceSet &SchedulingResources::GetTotalResources() const {
return resources_total_;
}
void SchedulingResources::SetLoadResources(ResourceSet &&newset) {
resources_load_ = newset;
}
const ResourceSet &SchedulingResources::GetLoadResources() const {
return resources_load_;
}
// Return specified resources back to SchedulingResources.
void SchedulingResources::Release(const ResourceSet &resources) {
return resources_available_.AddResourcesCapacityConstrained(resources,
resources_total_);
}
// Take specified resources from SchedulingResources.
void SchedulingResources::Acquire(const ResourceSet &resources) {
resources_available_.SubtractResourcesStrict(resources);
}
void SchedulingResources::UpdateResourceCapacity(const std::string &resource_name,
int64_t capacity) {
const FractionalResourceQuantity new_capacity = FractionalResourceQuantity(capacity);
const FractionalResourceQuantity ¤t_capacity =
resources_total_.GetResource(resource_name);
if (current_capacity > 0) {
// If the resource exists, add to total and available resources
const FractionalResourceQuantity capacity_difference =
new_capacity - current_capacity;
const FractionalResourceQuantity ¤t_available_capacity =
resources_available_.GetResource(resource_name);
FractionalResourceQuantity new_available_capacity =
current_available_capacity + capacity_difference;
if (new_available_capacity < 0) {
new_available_capacity = 0;
}
resources_total_.AddOrUpdateResource(resource_name, new_capacity);
resources_available_.AddOrUpdateResource(resource_name, new_available_capacity);
} else {
// Resource does not exist, just add it to total and available. Do not add to load.
resources_total_.AddOrUpdateResource(resource_name, new_capacity);
resources_available_.AddOrUpdateResource(resource_name, new_capacity);
}
}
void SchedulingResources::DeleteResource(const std::string &resource_name) {
resources_total_.DeleteResource(resource_name);
resources_available_.DeleteResource(resource_name);
resources_load_.DeleteResource(resource_name);
}
std::string SchedulingResources::DebugString() const {
std::stringstream result;
result << "\n- total: " << resources_total_.ToString();
result << "\n- avail: " << resources_available_.ToString();
return result.str();
};
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/scheduling_resources.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_SCHEDULING_RESOURCES_H
#define RAY_COMMON_TASK_SCHEDULING_RESOURCES_H
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "ray/raylet/format/node_manager_generated.h"
namespace ray {
/// Conversion factor that is the amount in internal units is equivalent to
/// one actual resource. Multiply to convert from actual to interal and
/// divide to convert from internal to actual.
constexpr double kResourceConversionFactor = 10000;
const std::string kCPU_ResourceLabel = "CPU";
const std::string kGPU_ResourceLabel = "GPU";
const std::string kTPU_ResourceLabel = "TPU";
const std::string kMemory_ResourceLabel = "memory";
/// \class FractionalResourceQuantity
/// \brief Converts the resource quantities to an internal representation to
/// avoid machine precision errors.
class FractionalResourceQuantity {
public:
/// \brief Construct a FractionalResourceQuantity representing zero
/// resources. This constructor is used by std::unordered_map when we try
/// to add a new FractionalResourceQuantity in ResourceSets.
FractionalResourceQuantity();
/// \brief Construct a FractionalResourceQuantity representing
/// resource_quantity.
FractionalResourceQuantity(double resource_quantity);
/// \brief Addition of FractionalResourceQuantity.
const FractionalResourceQuantity operator+(const FractionalResourceQuantity &rhs) const;
/// \brief Subtraction of FractionalResourceQuantity.
const FractionalResourceQuantity operator-(const FractionalResourceQuantity &rhs) const;
/// \brief Addition and assignment of FractionalResourceQuantity.
void operator+=(const FractionalResourceQuantity &rhs);
/// \brief Subtraction and assignment of FractionalResourceQuantity.
void operator-=(const FractionalResourceQuantity &rhs);
bool operator==(const FractionalResourceQuantity &rhs) const;
bool operator!=(const FractionalResourceQuantity &rhs) const;
bool operator<(const FractionalResourceQuantity &rhs) const;
bool operator>(const FractionalResourceQuantity &rhs) const;
bool operator<=(const FractionalResourceQuantity &rhs) const;
bool operator>=(const FractionalResourceQuantity &rhs) const;
/// \brief Return actual resource amount as a double.
double ToDouble() const;
private:
/// The resource quantity represented as 1/kResourceConversionFactor-th of a
/// unit.
int64_t resource_quantity_;
};
/// \class ResourceSet
/// \brief Encapsulates and operates on a set of resources, including CPUs,
/// GPUs, and custom labels.
class ResourceSet {
public:
/// \brief empty ResourceSet constructor.
ResourceSet();
/// \brief Constructs ResourceSet from the specified resource map.
ResourceSet(
const std::unordered_map<std::string, FractionalResourceQuantity> &resource_map);
/// \brief Constructs ResourceSet from the specified resource map.
ResourceSet(const std::unordered_map<std::string, double> &resource_map);
/// \brief Constructs ResourceSet from two equal-length vectors with label and capacity
/// specification.
ResourceSet(const std::vector<std::string> &resource_labels,
const std::vector<double> resource_capacity);
/// \brief Empty ResourceSet destructor.
~ResourceSet();
/// \brief Test equality with the other specified ResourceSet object.
///
/// \param rhs: Right-hand side object for equality comparison.
/// \return True if objects are equal, False otherwise.
bool operator==(const ResourceSet &rhs) const;
/// \brief Test equality with the other specified ResourceSet object.
///
/// \param other: Right-hand side object for equality comparison.
/// \return True if objects are equal, False otherwise.
bool IsEqual(const ResourceSet &other) const;
/// \brief Test whether this ResourceSet is a subset of the other ResourceSet.
///
/// \param other: The resource set we check being a subset of.
/// \return True if the current resource set is the subset of other. False
/// otherwise.
bool IsSubset(const ResourceSet &other) const;
/// \brief Test if this ResourceSet is a superset of the other ResourceSet.
///
/// \param other: The resource set we check being a superset of.
/// \return True if the current resource set is the superset of other.
/// False otherwise.
bool IsSuperset(const ResourceSet &other) const;
/// \brief Add or update a new resource to the resource set.
///
/// \param resource_name: name/label of the resource to add.
/// \param capacity: numeric capacity value for the resource to add.
/// \return True, if the resource was successfully added. False otherwise.
void AddOrUpdateResource(const std::string &resource_name,
const FractionalResourceQuantity &capacity);
/// \brief Delete a resource from the resource set.
///
/// \param resource_name: name/label of the resource to delete.
/// \return True if the resource was found while deleting, false if the resource did not
/// exist in the set.
bool DeleteResource(const std::string &resource_name);
/// \brief Add a set of resources to the current set of resources subject to upper
/// limits on capacity from the total_resource set.
///
/// \param other: The other resource set to add.
/// \param total_resources: Total resource set which sets upper limits on capacity for
/// each label. \return True if the resource set was added successfully. False
/// otherwise.
void AddResourcesCapacityConstrained(const ResourceSet &other,
const ResourceSet &total_resources);
/// \brief Aggregate resources from the other set into this set, adding any missing
/// resource labels to this set.
///
/// \param other: The other resource set to add.
/// \return Void.
void AddResources(const ResourceSet &other);
/// \brief Subtract a set of resources from the current set of resources and
/// check that the post-subtraction result nonnegative. Assumes other
/// is a subset of the ResourceSet. Deletes any resource if the capacity after
/// subtraction is zero.
///
/// \param other: The resource set to subtract from the current resource set.
/// \return Void.
void SubtractResources(const ResourceSet &other);
/// \brief Same as SubtractResources but throws an error if the resource value
/// goes below zero.
///
/// \param other: The resource set to subtract from the current resource set.
/// \return Void.
void SubtractResourcesStrict(const ResourceSet &other);
/// Return the capacity value associated with the specified resource.
///
/// \param resource_name: Resource name for which capacity is requested.
/// \return The capacity value associated with the specified resource, zero if resource
/// does not exist.
FractionalResourceQuantity GetResource(const std::string &resource_name) const;
/// Return the number of CPUs.
///
/// \return Number of CPUs.
const ResourceSet GetNumCpus() const;
/// Return true if the resource set is empty. False otherwise.
///
/// \return True if the resource capacity is zero. False otherwise.
bool IsEmpty() const;
// TODO(atumanov): implement const_iterator class for the ResourceSet container.
// TODO(williamma12): Make sure that everywhere we use doubles we don't
// convert it back to FractionalResourceQuantity.
/// \brief Return a map of the resource and size in doubles. Note, size is in
/// regular units and does not need to be multiplied by kResourceConversionFactor.
///
/// \return map of resource in string to size in double.
const std::unordered_map<std::string, double> GetResourceMap() const;
/// \brief Return a map of the resource and size in FractionalResourceQuantity. Note,
/// size is in kResourceConversionFactor of a unit.
///
/// \return map of resource in string to size in FractionalResourceQuantity.
const std::unordered_map<std::string, FractionalResourceQuantity>
&GetResourceAmountMap() const;
const std::string ToString() const;
private:
/// Resource capacity map.
std::unordered_map<std::string, FractionalResourceQuantity> resource_capacity_;
};
/// \class ResourceIds
/// \brief This class generalizes the concept of a resource "quantity" to
/// include specific resource IDs and fractions of those resources. A typical example
/// is GPUs, where the GPUs are numbered 0 through N-1, where N is the total number
/// of GPUs. This information is ultimately passed through to the worker processes
/// which need to know which GPUs to use.
class ResourceIds {
public:
/// \brief empty ResourceIds constructor.
ResourceIds();
/// \brief Constructs ResourceIds with a given amount of resource.
///
/// \param resource_quantity: The total amount of resource. This must either be
/// a whole number or a fraction less than 1.
explicit ResourceIds(double resource_quantity);
/// \brief Constructs ResourceIds with a given set of whole IDs.
///
/// \param whole_ids: A vector of the resource IDs that are completely available.
explicit ResourceIds(const std::vector<int64_t> &whole_ids);
/// \brief Constructs ResourceIds with a given set of fractional IDs.
///
/// \param fractional_ids: A vector of the resource IDs that are partially available.
explicit ResourceIds(
const std::vector<std::pair<int64_t, FractionalResourceQuantity>> &fractional_ids);
/// \brief Constructs ResourceIds with a given set of whole IDs and fractional IDs.
///
/// \param whole_ids: A vector of the resource IDs that are completely available.
/// \param fractional_ids: A vector of the resource IDs that are partially available.
ResourceIds(
const std::vector<int64_t> &whole_ids,
const std::vector<std::pair<int64_t, FractionalResourceQuantity>> &fractional_ids);
/// \brief Check if we have at least the requested amount.
///
/// If the argument is a whole number, then we return True precisely when
/// we have enough whole IDs (ignoring fractional IDs). If the argument is a
/// fraction, then there must either be a whole ID or a single fractional ID with
/// a sufficiently large availability. E.g., if there are two IDs that have
/// availability 0.5, then Contains(0.75) will return false.
///
/// \param resource_quantity Either a whole number or a fraction less than 1.
/// \return True if there we have enough of the resource.
bool Contains(const FractionalResourceQuantity &resource_quantity) const;
/// \brief Acquire the requested amount of the resource.
///
/// \param resource_quantity The amount to acquire. Either a whole number or a
/// fraction less than 1.
/// \return A ResourceIds representing the specific acquired IDs.
ResourceIds Acquire(const FractionalResourceQuantity &resource_quantity);
/// \brief Return some resource IDs.
///
/// \param resource_ids The specific resource IDs to return.
/// \return Void.
void Release(const ResourceIds &resource_ids);
/// \brief Combine these IDs with some other IDs and return the result.
///
/// \param resource_ids The IDs to add to these ones.
/// \return The combination of the IDs.
ResourceIds Plus(const ResourceIds &resource_ids) const;
/// \brief Return just the whole IDs.
///
/// \return The whole IDs.
const std::vector<int64_t> &WholeIds() const;
/// \brief Return just the fractional IDs.
///
/// \return The fractional IDs.
const std::vector<std::pair<int64_t, FractionalResourceQuantity>> &FractionalIds()
const;
/// \brief Check if ResourceIds has any resources.
///
/// \return True if there are no whole or fractional resources. False otherwise.
bool TotalQuantityIsZero() const;
/// \brief Return the total quantity of resources, ignoring the specific IDs.
///
/// \return The total quantity of the resource.
FractionalResourceQuantity TotalQuantity() const;
/// \brief Return a string representation of the object.
///
/// \return A human-readable string representing the object.
std::string ToString() const;
/// \brief Increase resource capacity by the given amount. This may throw an error if
/// decrement is more than currently available resources.
///
/// \param new_capacity int of new capacity
/// \return Void.
void UpdateCapacity(int64_t new_capacity);
private:
/// Check that a double is in fact a whole number.
///
/// \param resource_quantity A double.
/// \return True if the double is an integer and false otherwise.
bool IsWhole(double resource_quantity) const;
/// \brief Increase resource capacity by the given amount.
///
/// \param increment_quantity The quantity of resources to add.
/// \return Void.
void IncreaseCapacity(int64_t increment_quantity);
/// \brief Decrease resource capacity by the given amount. Adds to the decrement backlog
/// if more than available resources are decremented.
///
/// \param decrement_quantity The quantity of resources to remove.
/// \return Void.
void DecreaseCapacity(int64_t decrement_quantity);
/// A vector of distinct whole resource IDs.
std::vector<int64_t> whole_ids_;
/// A vector of pairs of resource ID and a fraction of that ID (the fraction
/// is at least zero and strictly less than 1).
std::vector<std::pair<int64_t, FractionalResourceQuantity>> fractional_ids_;
/// Quantity to track the total capacity of the resource, since the whole_ids_ vector
/// keeps changing
FractionalResourceQuantity total_capacity_;
/// Quantity to track any pending decrements in capacity that weren't executed because
/// of insufficient available resources. This backlog in cleared in the release method.
int64_t decrement_backlog_;
};
/// \class ResourceIdSet
/// \brief This class keeps track of the specific IDs that are available for a
/// collection of resources.
class ResourceIdSet {
public:
/// \brief empty ResourceIdSet constructor.
ResourceIdSet();
/// \brief Construct a ResourceIdSet from a ResourceSet.
///
/// \param resource_set A mapping from resource name to quantity.
ResourceIdSet(const ResourceSet &resource_set);
/// \brief Construct a ResourceIdSet from a mapping from resource names to ResourceIds.
///
/// \param resource_set A mapping from resource name to IDs.
ResourceIdSet(const std::unordered_map<std::string, ResourceIds> &available_resources);
/// \brief See if a requested collection of resources is contained.
///
/// \param resource_set A mapping from resource name to quantity.
/// \return True if each resource in resource_set is contained in the corresponding
/// ResourceIds in this ResourceIdSet.
bool Contains(const ResourceSet &resource_set) const;
/// \brief Acquire a set of resources and return the specific acquired IDs.
///
/// \param resource_set A mapping from resource name to quantity. This specifies
/// the amount of each resource to acquire.
/// \return A ResourceIdSet with the requested quantities, but with specific IDs.
ResourceIdSet Acquire(const ResourceSet &resource_set);
/// \brief Return a set of resource IDs.
///
/// \param resource_id_set The resource IDs to return.
/// \return Void.
void Release(const ResourceIdSet &resource_id_set);
/// \brief Return a set of resource IDs subject to their existence in the
/// resources_total set.
///
/// \param resource_id_set The resource IDs to return.
/// \param resources_total Constraint set to restrict the release to. If a resource
/// exists in resource_id_set but not in resources_total, it is not added to this
/// ResourceIdSet. \return Void.
void ReleaseConstrained(const ResourceIdSet &resource_id_set,
const ResourceSet &resources_total);
/// \brief Clear out all of the resource IDs.
///
/// \return Void.
void Clear();
/// \brief Combine another ResourceIdSet with this one.
///
/// \param resource_id_set The other set of resource IDs to combine with this one.
/// \return The combination of the two sets of resource IDs.
ResourceIdSet Plus(const ResourceIdSet &resource_id_set) const;
/// \brief Creates or updates a resource in the ResourceIdSet if it already exists.
/// Raises an exception if the new capacity (when less than old capacity) cannot be set
/// because of busy resources.
///
/// \param resource_name the name of the resource to create/update
/// \param capacity capacity of the resource being added
void AddOrUpdateResource(const std::string &resource_name, int64_t capacity);
/// \brief Deletes a resource in the ResourceIdSet. This does not raise an exception,
/// just deletes the resource. Tasks with acquired resources keep running.
///
/// \param resource_name the name of the resource to delete
void DeleteResource(const std::string &resource_name);
/// \brief Get the underlying mapping from resource name to resource IDs.
///
/// \return The resource name to resource IDs mapping.
const std::unordered_map<std::string, ResourceIds> &AvailableResources() const;
/// Return the CPU resources.
///
/// \return The CPU resources.
ResourceIdSet GetCpuResources() const;
/// \brief Get a mapping from each resource to the total quantity.
///
/// \return A mapping from each resource to the total quantity.
ResourceSet ToResourceSet() const;
/// \brief Get a string representation of the object.
///
/// \return A human-readable string version of the object.
std::string ToString() const;
/// \brief Serialize this object using flatbuffers.
///
/// \param fbb A flatbuffer builder object.
/// \return A flatbuffer serialized version of this object.
std::vector<flatbuffers::Offset<ray::protocol::ResourceIdSetInfo>> ToFlatbuf(
flatbuffers::FlatBufferBuilder &fbb) const;
/// \brief Serialize this object as a string.
///
/// \return A serialized string of this object.
/// TODO(zhijunfu): this can be removed after raylet client is migrated to grpc.
const std::string Serialize() const;
private:
/// A mapping from resource name to a set of resource IDs for that resource.
std::unordered_map<std::string, ResourceIds> available_resources_;
};
/// \class SchedulingResources
/// SchedulingResources class encapsulates the state of all local resources and
/// manages accounting of those resources. Resources include configured resource
/// bundle capacity, and GPU allocation map.
class SchedulingResources {
public:
/// SchedulingResources constructor: sets configured and available resources
/// to an empty set.
SchedulingResources();
/// SchedulingResources constructor: sets available and configured capacity
/// to the resource set specified.
///
/// \param total: The amount of total configured capacity.
SchedulingResources(const ResourceSet &total);
/// \brief SchedulingResources destructor.
~SchedulingResources();
/// \brief Request the set and capacity of resources currently available.
///
/// \return Immutable set of resources with currently available capacity.
const ResourceSet &GetAvailableResources() const;
/// \brief Overwrite available resource capacity with the specified resource set.
///
/// \param newset: The set of resources that replaces available resource capacity.
/// \return Void.
void SetAvailableResources(ResourceSet &&newset);
const ResourceSet &GetTotalResources() const;
/// \brief Overwrite information about resource load with new resource load set.
///
/// \param newset: The set of resources that replaces resource load information.
/// \return Void.
void SetLoadResources(ResourceSet &&newset);
/// \brief Request the resource load information.
///
/// \return Immutable set of resources describing the load information.
const ResourceSet &GetLoadResources() const;
/// \brief Release the amount of resources specified.
///
/// \param resources: the amount of resources to be released.
/// \return Void.
void Release(const ResourceSet &resources);
/// \brief Acquire the amount of resources specified.
///
/// \param resources: the amount of resources to be acquired.
/// \return Void.
void Acquire(const ResourceSet &resources);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// \brief Update total, available and load resources with the specified capacity.
/// Create if not exists.
///
/// \param resource_name: Name of the resource to be modified
/// \param capacity: New capacity of the resource.
/// \return Void.
void UpdateResourceCapacity(const std::string &resource_name, int64_t capacity);
/// \brief Delete resource from total, available and load resources.
///
/// \param resource_name: Name of the resource to be deleted.
/// \return Void.
void DeleteResource(const std::string &resource_name);
private:
/// Static resource configuration (e.g., static_resources).
ResourceSet resources_total_;
/// Dynamic resource capacity (e.g., dynamic_resources).
ResourceSet resources_available_;
/// Resource load.
ResourceSet resources_load_;
};
} // namespace ray
namespace std {
template <>
struct hash<ray::ResourceSet> {
size_t operator()(ray::ResourceSet const &k) const {
size_t seed = k.GetResourceMap().size();
for (auto &elem : k.GetResourceMap()) {
seed ^= std::hash<std::string>()(elem.first);
seed ^= std::hash<double>()(elem.second);
}
return seed;
}
};
} // namespace std
#endif // RAY_COMMON_TASK_SCHEDULING_RESOURCES_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task.cc
|
C++
|
#include <sstream>
#include "task.h"
namespace ray {
const TaskExecutionSpecification &Task::GetTaskExecutionSpec() const {
return task_execution_spec_;
}
const TaskSpecification &Task::GetTaskSpecification() const { return task_spec_; }
void Task::IncrementNumForwards() { task_execution_spec_.IncrementNumForwards(); }
const std::vector<ObjectID> &Task::GetDependencies() const { return dependencies_; }
void Task::ComputeDependencies() { dependencies_ = task_spec_.GetDependencies(); }
void Task::CopyTaskExecutionSpec(const Task &task) {
task_execution_spec_ = task.task_execution_spec_;
}
std::string Task::DebugString() const {
std::ostringstream stream;
stream << "task_spec={" << task_spec_.DebugString() << "}, task_execution_spec={"
<< task_execution_spec_.DebugString() << "}";
return stream.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_TASK_H
#define RAY_COMMON_TASK_TASK_H
#include <inttypes.h>
#include "ray/common/task/task_common.h"
#include "ray/common/task/task_execution_spec.h"
#include "ray/common/task/task_spec.h"
namespace ray {
typedef std::function<void(const std::shared_ptr<void>, const std::string &, int,
const WorkerID &, const ResourceIdSet &)>
DispatchTaskCallback;
/// Arguments are the raylet ID to spill back to, the raylet's
/// address and the raylet's port.
typedef std::function<void(const ClientID &, const std::string &, int)>
SpillbackTaskCallback;
/// \class Task
///
/// A Task represents a Ray task and a specification of its execution (e.g.,
/// resource demands). The task's specification contains both immutable fields,
/// determined at submission time, and mutable fields, determined at execution
/// time.
class Task {
public:
/// Construct an empty task. This should only be used to pass a task
/// as an out parameter to a function or method.
Task() {}
/// Construct a `Task` object from a protobuf message.
///
/// \param message The protobuf message.
explicit Task(const rpc::Task &message)
: task_spec_(message.task_spec()),
task_execution_spec_(message.task_execution_spec()) {
ComputeDependencies();
}
/// Construct a `Task` object from a `TaskSpecification` and a
/// `TaskExecutionSpecification`.
Task(TaskSpecification task_spec, TaskExecutionSpecification task_execution_spec)
: task_spec_(std::move(task_spec)),
task_execution_spec_(std::move(task_execution_spec)) {
ComputeDependencies();
}
/// Override dispatch behaviour.
void OnDispatchInstead(const DispatchTaskCallback &callback) {
on_dispatch_ = callback;
}
/// Override spillback behaviour.
void OnSpillbackInstead(const SpillbackTaskCallback &callback) {
on_spillback_ = callback;
}
/// Get the mutable specification for the task. This specification may be
/// updated at runtime.
///
/// \return The mutable specification for the task.
const TaskExecutionSpecification &GetTaskExecutionSpec() const;
/// Get the immutable specification for the task.
///
/// \return The immutable specification for the task.
const TaskSpecification &GetTaskSpecification() const;
/// Increment the number of times this task has been forwarded.
void IncrementNumForwards();
/// Get the task's object dependencies. This comprises the immutable task
/// arguments and the mutable execution dependencies.
///
/// \return The object dependencies.
const std::vector<ObjectID> &GetDependencies() const;
/// Update the dynamic/mutable information for this task.
/// \param task Task structure with updated dynamic information.
void CopyTaskExecutionSpec(const Task &task);
/// Returns the override dispatch task callback, or nullptr.
const DispatchTaskCallback &OnDispatch() const { return on_dispatch_; }
/// Returns the override spillback task callback, or nullptr.
const SpillbackTaskCallback &OnSpillback() const { return on_spillback_; }
std::string DebugString() const;
private:
void ComputeDependencies();
/// Task specification object, consisting of immutable information about this
/// task determined at submission time. Includes resource demand, object
/// dependencies, etc.
TaskSpecification task_spec_;
/// Task execution specification, consisting of all dynamic/mutable
/// information about this task determined at execution time.
TaskExecutionSpecification task_execution_spec_;
/// A cached copy of the task's object dependencies, including arguments from
/// the TaskSpecification and execution dependencies from the
/// TaskExecutionSpecification.
std::vector<ObjectID> dependencies_;
/// For direct task calls, overrides the dispatch behaviour to send an RPC
/// back to the submitting worker.
mutable DispatchTaskCallback on_dispatch_ = nullptr;
/// For direct task calls, overrides the spillback behaviour to send an RPC
/// back to the submitting worker.
mutable SpillbackTaskCallback on_spillback_ = nullptr;
};
} // namespace ray
#endif // RAY_COMMON_TASK_TASK_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_common.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_TASK_COMMON_H
#define RAY_COMMON_TASK_TASK_COMMON_H
#include "ray/protobuf/common.pb.h"
namespace ray {
// NOTE(hchen): Below we alias `ray::rpc::Language|TaskType)` in `ray` namespace.
// The reason is because other code should use them as if they were defined in this
// `task_common.h` file, shouldn't care about the implementation detail that they
// are defined in protobuf.
/// See `common.proto` for definition of `Language` enum.
using Language = rpc::Language;
/// See `common.proto` for definition of `TaskType` enum.
using TaskType = rpc::TaskType;
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_execution_spec.cc
|
C++
|
#include <sstream>
#include "ray/common/task/task_execution_spec.h"
namespace ray {
size_t TaskExecutionSpecification::NumForwards() const {
return message_->num_forwards();
}
void TaskExecutionSpecification::IncrementNumForwards() {
message_->set_num_forwards(message_->num_forwards() + 1);
}
std::string TaskExecutionSpecification::DebugString() const {
std::ostringstream stream;
stream << "num_forwards=" << message_->num_forwards();
return stream.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_execution_spec.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_TASK_EXECUTION_SPEC_H
#define RAY_COMMON_TASK_TASK_EXECUTION_SPEC_H
#include <vector>
#include "ray/common/grpc_util.h"
#include "ray/common/id.h"
#include "ray/common/task/task_common.h"
namespace ray {
/// Wrapper class of protobuf `TaskExecutionSpec`, see `common.proto` for details.
class TaskExecutionSpecification : public MessageWrapper<rpc::TaskExecutionSpec> {
public:
/// Construct an emtpy task execution specification. This should not be used
/// directly.
TaskExecutionSpecification() {}
/// Construct from a protobuf message object.
/// The input message will be **copied** into this object.
///
/// \param message The protobuf message.
explicit TaskExecutionSpecification(rpc::TaskExecutionSpec message)
: MessageWrapper(std::move(message)) {}
/// Construct from protobuf-serialized binary.
///
/// \param serialized_binary Protobuf-serialized binary.
explicit TaskExecutionSpecification(const std::string &serialized_binary)
: MessageWrapper(serialized_binary) {}
/// Get the number of times this task has been forwarded.
///
/// \return The number of times this task has been forwarded.
size_t NumForwards() const;
/// Increment the number of times this task has been forwarded.
void IncrementNumForwards();
std::string DebugString() const;
};
} // namespace ray
#endif // RAY_COMMON_TASK_TASK_EXECUTION_SPEC_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_spec.cc
|
C++
|
#include <sstream>
#include "ray/common/task/task_spec.h"
#include "ray/util/logging.h"
namespace ray {
absl::Mutex TaskSpecification::mutex_;
std::unordered_map<SchedulingClassDescriptor, SchedulingClass>
TaskSpecification::sched_cls_to_id_;
std::unordered_map<SchedulingClass, SchedulingClassDescriptor>
TaskSpecification::sched_id_to_cls_;
int TaskSpecification::next_sched_id_;
SchedulingClassDescriptor &TaskSpecification::GetSchedulingClassDescriptor(
SchedulingClass id) {
absl::MutexLock lock(&mutex_);
auto it = sched_id_to_cls_.find(id);
RAY_CHECK(it != sched_id_to_cls_.end()) << "invalid id: " << id;
return it->second;
}
void TaskSpecification::ComputeResources() {
auto required_resources = MapFromProtobuf(message_->required_resources());
auto required_placement_resources =
MapFromProtobuf(message_->required_placement_resources());
if (required_placement_resources.empty()) {
required_placement_resources = required_resources;
}
required_resources_.reset(new ResourceSet(required_resources));
required_placement_resources_.reset(new ResourceSet(required_placement_resources));
// Map the scheduling class descriptor to an integer for performance.
auto sched_cls = std::make_pair(GetRequiredResources(), FunctionDescriptor());
absl::MutexLock lock(&mutex_);
auto it = sched_cls_to_id_.find(sched_cls);
if (it == sched_cls_to_id_.end()) {
sched_cls_id_ = ++next_sched_id_;
// TODO(ekl) we might want to try cleaning up task types in these cases
if (sched_cls_id_ > 100) {
RAY_LOG(WARNING) << "More than " << sched_cls_id_
<< " types of tasks seen, this may reduce performance.";
} else if (sched_cls_id_ > 1000) {
RAY_LOG(ERROR) << "More than " << sched_cls_id_
<< " types of tasks seen, this may reduce performance.";
}
sched_cls_to_id_[sched_cls] = sched_cls_id_;
sched_id_to_cls_[sched_cls_id_] = sched_cls;
} else {
sched_cls_id_ = it->second;
}
}
// Task specification getter methods.
TaskID TaskSpecification::TaskId() const {
if (message_->task_id().empty() /* e.g., empty proto default */) {
return TaskID::Nil();
}
return TaskID::FromBinary(message_->task_id());
}
JobID TaskSpecification::JobId() const {
if (message_->job_id().empty() /* e.g., empty proto default */) {
return JobID::Nil();
}
return JobID::FromBinary(message_->job_id());
}
TaskID TaskSpecification::ParentTaskId() const {
if (message_->parent_task_id().empty() /* e.g., empty proto default */) {
return TaskID::Nil();
}
return TaskID::FromBinary(message_->parent_task_id());
}
size_t TaskSpecification::ParentCounter() const { return message_->parent_counter(); }
std::vector<std::string> TaskSpecification::FunctionDescriptor() const {
return VectorFromProtobuf(message_->function_descriptor());
}
const SchedulingClass TaskSpecification::GetSchedulingClass() const {
RAY_CHECK(sched_cls_id_ > 0);
return sched_cls_id_;
}
size_t TaskSpecification::NumArgs() const { return message_->args_size(); }
size_t TaskSpecification::NumReturns() const { return message_->num_returns(); }
ObjectID TaskSpecification::ReturnId(size_t return_index,
TaskTransportType transport_type) const {
return ObjectID::ForTaskReturn(TaskId(), return_index + 1,
static_cast<uint8_t>(transport_type));
}
bool TaskSpecification::ArgByRef(size_t arg_index) const {
return (ArgIdCount(arg_index) != 0);
}
size_t TaskSpecification::ArgIdCount(size_t arg_index) const {
return message_->args(arg_index).object_ids_size();
}
ObjectID TaskSpecification::ArgId(size_t arg_index, size_t id_index) const {
return ObjectID::FromBinary(message_->args(arg_index).object_ids(id_index));
}
const uint8_t *TaskSpecification::ArgData(size_t arg_index) const {
return reinterpret_cast<const uint8_t *>(message_->args(arg_index).data().data());
}
size_t TaskSpecification::ArgDataSize(size_t arg_index) const {
return message_->args(arg_index).data().size();
}
const uint8_t *TaskSpecification::ArgMetadata(size_t arg_index) const {
return reinterpret_cast<const uint8_t *>(message_->args(arg_index).metadata().data());
}
size_t TaskSpecification::ArgMetadataSize(size_t arg_index) const {
return message_->args(arg_index).metadata().size();
}
const ResourceSet &TaskSpecification::GetRequiredResources() const {
return *required_resources_;
}
std::vector<ObjectID> TaskSpecification::GetDependencies() const {
std::vector<ObjectID> dependencies;
for (size_t i = 0; i < NumArgs(); ++i) {
int count = ArgIdCount(i);
for (int j = 0; j < count; j++) {
dependencies.push_back(ArgId(i, j));
}
}
if (IsActorTask()) {
dependencies.push_back(PreviousActorTaskDummyObjectId());
}
return dependencies;
}
const ResourceSet &TaskSpecification::GetRequiredPlacementResources() const {
return *required_placement_resources_;
}
bool TaskSpecification::IsDriverTask() const {
// Driver tasks are empty tasks that have no function ID set.
return FunctionDescriptor().empty();
}
Language TaskSpecification::GetLanguage() const { return message_->language(); }
bool TaskSpecification::IsNormalTask() const {
return message_->type() == TaskType::NORMAL_TASK;
}
bool TaskSpecification::IsActorCreationTask() const {
return message_->type() == TaskType::ACTOR_CREATION_TASK;
}
bool TaskSpecification::IsActorTask() const {
return message_->type() == TaskType::ACTOR_TASK;
}
// === Below are getter methods specific to actor creation tasks.
ActorID TaskSpecification::ActorCreationId() const {
RAY_CHECK(IsActorCreationTask());
return ActorID::FromBinary(message_->actor_creation_task_spec().actor_id());
}
uint64_t TaskSpecification::MaxActorReconstructions() const {
RAY_CHECK(IsActorCreationTask());
return message_->actor_creation_task_spec().max_actor_reconstructions();
}
std::vector<std::string> TaskSpecification::DynamicWorkerOptions() const {
RAY_CHECK(IsActorCreationTask());
return VectorFromProtobuf(
message_->actor_creation_task_spec().dynamic_worker_options());
}
TaskID TaskSpecification::CallerId() const {
return TaskID::FromBinary(message_->caller_id());
}
// === Below are getter methods specific to actor tasks.
ActorID TaskSpecification::ActorId() const {
RAY_CHECK(IsActorTask());
return ActorID::FromBinary(message_->actor_task_spec().actor_id());
}
uint64_t TaskSpecification::ActorCounter() const {
RAY_CHECK(IsActorTask());
return message_->actor_task_spec().actor_counter();
}
ObjectID TaskSpecification::ActorCreationDummyObjectId() const {
RAY_CHECK(IsActorTask());
return ObjectID::FromBinary(
message_->actor_task_spec().actor_creation_dummy_object_id());
}
ObjectID TaskSpecification::PreviousActorTaskDummyObjectId() const {
RAY_CHECK(IsActorTask());
return ObjectID::FromBinary(
message_->actor_task_spec().previous_actor_task_dummy_object_id());
}
ObjectID TaskSpecification::ActorDummyObject() const {
RAY_CHECK(IsActorTask() || IsActorCreationTask());
return ReturnId(NumReturns() - 1, TaskTransportType::RAYLET);
}
bool TaskSpecification::IsDirectCall() const { return message_->is_direct_call(); }
bool TaskSpecification::IsDirectActorCreationCall() const {
if (IsActorCreationTask()) {
return message_->actor_creation_task_spec().is_direct_call();
} else {
return false;
}
}
int TaskSpecification::MaxActorConcurrency() const {
RAY_CHECK(IsActorCreationTask());
return message_->actor_creation_task_spec().max_concurrency();
}
bool TaskSpecification::IsAsyncioActor() const {
RAY_CHECK(IsActorCreationTask());
return message_->actor_creation_task_spec().is_asyncio();
}
bool TaskSpecification::IsDetachedActor() const {
return IsActorCreationTask() && message_->actor_creation_task_spec().is_detached();
}
std::string TaskSpecification::DebugString() const {
std::ostringstream stream;
stream << "Type=" << TaskType_Name(message_->type())
<< ", Language=" << Language_Name(message_->language())
<< ", function_descriptor=";
// Print function descriptor.
const auto list = VectorFromProtobuf(message_->function_descriptor());
// The 4th is the code hash which is binary bits. No need to output it.
const size_t size = std::min(static_cast<size_t>(3), list.size());
for (size_t i = 0; i < size; ++i) {
if (i != 0) {
stream << ",";
}
stream << list[i];
}
stream << ", task_id=" << TaskId() << ", job_id=" << JobId()
<< ", num_args=" << NumArgs() << ", num_returns=" << NumReturns();
if (IsActorCreationTask()) {
// Print actor creation task spec.
stream << ", actor_creation_task_spec={actor_id=" << ActorCreationId()
<< ", max_reconstructions=" << MaxActorReconstructions()
<< ", is_direct_call=" << IsDirectCall()
<< ", max_concurrency=" << MaxActorConcurrency()
<< ", is_asyncio_actor=" << IsAsyncioActor()
<< ", is_detached=" << IsDetachedActor() << "}";
} else if (IsActorTask()) {
// Print actor task spec.
stream << ", actor_task_spec={actor_id=" << ActorId()
<< ", actor_caller_id=" << CallerId() << ", actor_counter=" << ActorCounter()
<< "}";
}
return stream.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_spec.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_TASK_SPEC_H
#define RAY_COMMON_TASK_TASK_SPEC_H
#include <cstddef>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/synchronization/mutex.h"
#include "ray/common/grpc_util.h"
#include "ray/common/id.h"
#include "ray/common/task/scheduling_resources.h"
#include "ray/common/task/task_common.h"
extern "C" {
#include "ray/thirdparty/sha256.h"
}
namespace ray {
typedef std::vector<std::string> FunctionDescriptor;
typedef std::pair<ResourceSet, FunctionDescriptor> SchedulingClassDescriptor;
typedef int SchedulingClass;
/// Wrapper class of protobuf `TaskSpec`, see `common.proto` for details.
/// TODO(ekl) we should consider passing around std::unique_ptrs<TaskSpecification>
/// instead `const TaskSpecification`, since this class is actually mutable.
class TaskSpecification : public MessageWrapper<rpc::TaskSpec> {
public:
/// Construct an empty task specification. This should not be used directly.
TaskSpecification() {}
/// Construct from a protobuf message object.
/// The input message will be **copied** into this object.
///
/// \param message The protobuf message.
explicit TaskSpecification(rpc::TaskSpec message) : MessageWrapper(message) {
ComputeResources();
}
/// Construct from a protobuf message shared_ptr.
///
/// \param message The protobuf message.
explicit TaskSpecification(std::shared_ptr<rpc::TaskSpec> message)
: MessageWrapper(message) {
ComputeResources();
}
/// Construct from protobuf-serialized binary.
///
/// \param serialized_binary Protobuf-serialized binary.
explicit TaskSpecification(const std::string &serialized_binary)
: MessageWrapper(serialized_binary) {
ComputeResources();
}
// TODO(swang): Finalize and document these methods.
TaskID TaskId() const;
JobID JobId() const;
TaskID ParentTaskId() const;
size_t ParentCounter() const;
std::vector<std::string> FunctionDescriptor() const;
size_t NumArgs() const;
size_t NumReturns() const;
bool ArgByRef(size_t arg_index) const;
size_t ArgIdCount(size_t arg_index) const;
ObjectID ArgId(size_t arg_index, size_t id_index) const;
ObjectID ReturnId(size_t return_index, TaskTransportType transport_type) const;
ObjectID ReturnIdForPlasma(size_t return_index) const {
return ReturnId(return_index, TaskTransportType::RAYLET);
}
const uint8_t *ArgData(size_t arg_index) const;
size_t ArgDataSize(size_t arg_index) const;
const uint8_t *ArgMetadata(size_t arg_index) const;
size_t ArgMetadataSize(size_t arg_index) const;
/// Return the scheduling class of the task. The scheduler makes a best effort
/// attempt to fairly dispatch tasks of different classes, preventing
/// starvation of any single class of task.
///
/// \return The scheduling class used for fair task queueing.
const SchedulingClass GetSchedulingClass() const;
/// Return the resources that are to be acquired during the execution of this
/// task.
///
/// \return The resources that will be acquired during the execution of this
/// task.
const ResourceSet &GetRequiredResources() const;
/// Return the resources that are required for a task to be placed on a node.
/// This will typically be the same as the resources acquired during execution
/// and will always be a superset of those resources. However, they may
/// differ, e.g., actor creation tasks may require more resources to be
/// scheduled on a machine because the actor creation task may require no
/// resources itself, but subsequent actor methods may require resources, and
/// so the placement of the actor should take this into account.
///
/// \return The resources that are required to place a task on a node.
const ResourceSet &GetRequiredPlacementResources() const;
/// Return the dependencies of this task. This is recomputed each time, so it can
/// be used if the task spec is mutated.
///
/// \return The recomputed dependencies for the task.
std::vector<ObjectID> GetDependencies() const;
bool IsDriverTask() const;
Language GetLanguage() const;
/// Whether this task is a normal task.
bool IsNormalTask() const;
/// Whether this task is an actor creation task.
bool IsActorCreationTask() const;
/// Whether this task is an actor task.
bool IsActorTask() const;
// Methods specific to actor creation tasks.
ActorID ActorCreationId() const;
uint64_t MaxActorReconstructions() const;
std::vector<std::string> DynamicWorkerOptions() const;
// Methods specific to actor tasks.
ActorID ActorId() const;
TaskID CallerId() const;
uint64_t ActorCounter() const;
ObjectID ActorCreationDummyObjectId() const;
ObjectID PreviousActorTaskDummyObjectId() const;
bool IsDirectCall() const;
bool IsDirectActorCreationCall() const;
int MaxActorConcurrency() const;
bool IsAsyncioActor() const;
bool IsDetachedActor() const;
ObjectID ActorDummyObject() const;
std::string DebugString() const;
static SchedulingClassDescriptor &GetSchedulingClassDescriptor(SchedulingClass id);
private:
void ComputeResources();
/// Field storing required resources. Initalized in constructor.
/// TODO(ekl) consider optimizing the representation of ResourceSet for fast copies
/// instead of keeping shared ptrs here.
std::shared_ptr<ResourceSet> required_resources_;
/// Field storing required placement resources. Initalized in constructor.
std::shared_ptr<ResourceSet> required_placement_resources_;
/// Cached scheduling class of this task.
SchedulingClass sched_cls_id_;
/// Below static fields could be mutated in `ComputeResources` concurrently due to
/// multi-threading, we need a mutex to protect it.
static absl::Mutex mutex_;
/// Keep global static id mappings for SchedulingClass for performance.
static std::unordered_map<SchedulingClassDescriptor, SchedulingClass> sched_cls_to_id_
GUARDED_BY(mutex_);
static std::unordered_map<SchedulingClass, SchedulingClassDescriptor> sched_id_to_cls_
GUARDED_BY(mutex_);
static int next_sched_id_ GUARDED_BY(mutex_);
};
} // namespace ray
/// We must define the hash since it's not auto-defined for vectors.
namespace std {
template <>
struct hash<ray::SchedulingClassDescriptor> {
size_t operator()(ray::SchedulingClassDescriptor const &k) const {
size_t seed = std::hash<ray::ResourceSet>()(k.first);
for (const auto &str : k.second) {
seed ^= std::hash<std::string>()(str);
}
return seed;
}
};
} // namespace std
#endif // RAY_COMMON_TASK_TASK_SPEC_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/task/task_util.h
|
C/C++ Header
|
#ifndef RAY_COMMON_TASK_TASK_UTIL_H
#define RAY_COMMON_TASK_TASK_UTIL_H
#include "ray/common/buffer.h"
#include "ray/common/ray_object.h"
#include "ray/common/task/task_spec.h"
#include "ray/protobuf/common.pb.h"
namespace ray {
/// Helper class for building a `TaskSpecification` object.
class TaskSpecBuilder {
public:
TaskSpecBuilder() : message_(std::make_shared<rpc::TaskSpec>()) {}
/// Build the `TaskSpecification` object.
TaskSpecification Build() { return TaskSpecification(message_); }
/// Get a reference to the internal protobuf message object.
const rpc::TaskSpec &GetMessage() const { return *message_; }
/// Set the common attributes of the task spec.
/// See `common.proto` for meaning of the arguments.
///
/// \return Reference to the builder object itself.
TaskSpecBuilder &SetCommonTaskSpec(
const TaskID &task_id, const Language &language,
const std::vector<std::string> &function_descriptor, const JobID &job_id,
const TaskID &parent_task_id, uint64_t parent_counter, const TaskID &caller_id,
const rpc::Address &caller_address, uint64_t num_returns, bool is_direct_call,
const std::unordered_map<std::string, double> &required_resources,
const std::unordered_map<std::string, double> &required_placement_resources) {
message_->set_type(TaskType::NORMAL_TASK);
message_->set_language(language);
for (const auto &fd : function_descriptor) {
message_->add_function_descriptor(fd);
}
message_->set_job_id(job_id.Binary());
message_->set_task_id(task_id.Binary());
message_->set_parent_task_id(parent_task_id.Binary());
message_->set_parent_counter(parent_counter);
message_->set_caller_id(caller_id.Binary());
message_->mutable_caller_address()->CopyFrom(caller_address);
message_->set_num_returns(num_returns);
message_->set_is_direct_call(is_direct_call);
message_->mutable_required_resources()->insert(required_resources.begin(),
required_resources.end());
message_->mutable_required_placement_resources()->insert(
required_placement_resources.begin(), required_placement_resources.end());
return *this;
}
/// Add a by-reference argument to the task.
///
/// \param arg_id Id of the argument.
/// \return Reference to the builder object itself.
TaskSpecBuilder &AddByRefArg(const ObjectID &arg_id) {
message_->add_args()->add_object_ids(arg_id.Binary());
return *this;
}
/// Add a by-value argument to the task.
///
/// \param data String object that contains the data.
/// \param metadata String object that contains the metadata.
/// \return Reference to the builder object itself.
TaskSpecBuilder &AddByValueArg(const std::string &data, const std::string &metadata) {
auto arg = message_->add_args();
arg->set_data(data);
arg->set_metadata(metadata);
return *this;
}
/// Add a by-value argument to the task.
///
/// \param value the RayObject instance that contains the data and the metadata.
/// \return Reference to the builder object itself.
TaskSpecBuilder &AddByValueArg(const RayObject &value) {
auto arg = message_->add_args();
if (value.HasData()) {
const auto &data = value.GetData();
arg->set_data(data->Data(), data->Size());
}
if (value.HasMetadata()) {
const auto &metadata = value.GetMetadata();
arg->set_metadata(metadata->Data(), metadata->Size());
}
return *this;
}
/// Set the `ActorCreationTaskSpec` of the task spec.
/// See `common.proto` for meaning of the arguments.
///
/// \return Reference to the builder object itself.
TaskSpecBuilder &SetActorCreationTaskSpec(
const ActorID &actor_id, uint64_t max_reconstructions = 0,
const std::vector<std::string> &dynamic_worker_options = {},
bool is_direct_call = false, int max_concurrency = 1, bool is_detached = false,
bool is_asyncio = false) {
message_->set_type(TaskType::ACTOR_CREATION_TASK);
auto actor_creation_spec = message_->mutable_actor_creation_task_spec();
actor_creation_spec->set_actor_id(actor_id.Binary());
actor_creation_spec->set_max_actor_reconstructions(max_reconstructions);
for (const auto &option : dynamic_worker_options) {
actor_creation_spec->add_dynamic_worker_options(option);
}
actor_creation_spec->set_is_direct_call(is_direct_call);
actor_creation_spec->set_max_concurrency(max_concurrency);
actor_creation_spec->set_is_asyncio(is_asyncio);
actor_creation_spec->set_is_detached(is_detached);
return *this;
}
/// Set the `ActorTaskSpec` of the task spec.
/// See `common.proto` for meaning of the arguments.
///
/// \return Reference to the builder object itself.
TaskSpecBuilder &SetActorTaskSpec(const ActorID &actor_id,
const ObjectID &actor_creation_dummy_object_id,
const ObjectID &previous_actor_task_dummy_object_id,
uint64_t actor_counter) {
message_->set_type(TaskType::ACTOR_TASK);
auto actor_spec = message_->mutable_actor_task_spec();
actor_spec->set_actor_id(actor_id.Binary());
actor_spec->set_actor_creation_dummy_object_id(
actor_creation_dummy_object_id.Binary());
actor_spec->set_previous_actor_task_dummy_object_id(
previous_actor_task_dummy_object_id.Binary());
actor_spec->set_actor_counter(actor_counter);
return *this;
}
private:
std::shared_ptr<rpc::TaskSpec> message_;
};
} // namespace ray
#endif // RAY_COMMON_TASK_TASK_UTIL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/actor_handle.cc
|
C++
|
#include "ray/core_worker/actor_handle.h"
#include <memory>
namespace {
ray::rpc::ActorHandle CreateInnerActorHandle(
const class ActorID &actor_id, const class JobID &job_id,
const ObjectID &initial_cursor, const Language actor_language, bool is_direct_call,
const std::vector<std::string> &actor_creation_task_function_descriptor) {
ray::rpc::ActorHandle inner;
inner.set_actor_id(actor_id.Data(), actor_id.Size());
inner.set_creation_job_id(job_id.Data(), job_id.Size());
inner.set_actor_language(actor_language);
*inner.mutable_actor_creation_task_function_descriptor() = {
actor_creation_task_function_descriptor.begin(),
actor_creation_task_function_descriptor.end()};
inner.set_actor_cursor(initial_cursor.Binary());
inner.set_is_direct_call(is_direct_call);
return inner;
}
ray::rpc::ActorHandle CreateInnerActorHandleFromString(const std::string &serialized) {
ray::rpc::ActorHandle inner;
inner.ParseFromString(serialized);
return inner;
}
} // namespace
namespace ray {
ActorHandle::ActorHandle(
const class ActorID &actor_id, const class JobID &job_id,
const ObjectID &initial_cursor, const Language actor_language, bool is_direct_call,
const std::vector<std::string> &actor_creation_task_function_descriptor)
: ActorHandle(CreateInnerActorHandle(actor_id, job_id, initial_cursor, actor_language,
is_direct_call,
actor_creation_task_function_descriptor)) {}
ActorHandle::ActorHandle(const std::string &serialized)
: ActorHandle(CreateInnerActorHandleFromString(serialized)) {}
void ActorHandle::SetActorTaskSpec(TaskSpecBuilder &builder,
const TaskTransportType transport_type,
const ObjectID new_cursor) {
absl::MutexLock guard(&mutex_);
// Build actor task spec.
const TaskID actor_creation_task_id = TaskID::ForActorCreationTask(GetActorID());
const ObjectID actor_creation_dummy_object_id =
ObjectID::ForTaskReturn(actor_creation_task_id, /*index=*/1,
/*transport_type=*/static_cast<int>(transport_type));
builder.SetActorTaskSpec(GetActorID(), actor_creation_dummy_object_id,
/*previous_actor_task_dummy_object_id=*/actor_cursor_,
task_counter_++);
actor_cursor_ = new_cursor;
}
void ActorHandle::Serialize(std::string *output) { inner_.SerializeToString(output); }
void ActorHandle::Reset() {
absl::MutexLock guard(&mutex_);
task_counter_ = 0;
actor_cursor_ = ObjectID::FromBinary(inner_.actor_cursor());
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/actor_handle.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_ACTOR_HANDLE_H
#define RAY_CORE_WORKER_ACTOR_HANDLE_H
#include <gtest/gtest_prod.h>
#include "ray/common/id.h"
#include "ray/common/task/task_util.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/context.h"
#include "ray/protobuf/core_worker.pb.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
class ActorHandle {
public:
ActorHandle(ray::rpc::ActorHandle inner)
: inner_(inner), actor_cursor_(ObjectID::FromBinary(inner_.actor_cursor())) {}
// Constructs a new ActorHandle as part of the actor creation process.
ActorHandle(const ActorID &actor_id, const JobID &job_id,
const ObjectID &initial_cursor, const Language actor_language,
bool is_direct_call,
const std::vector<std::string> &actor_creation_task_function_descriptor);
/// Constructs an ActorHandle from a serialized string.
ActorHandle(const std::string &serialized);
ActorID GetActorID() const { return ActorID::FromBinary(inner_.actor_id()); };
/// ID of the job that created the actor (it is possible that the handle
/// exists on a job with a different job ID).
JobID CreationJobID() const { return JobID::FromBinary(inner_.creation_job_id()); };
Language ActorLanguage() const { return inner_.actor_language(); };
std::vector<std::string> ActorCreationTaskFunctionDescriptor() const {
return VectorFromProtobuf(inner_.actor_creation_task_function_descriptor());
};
bool IsDirectCallActor() const { return inner_.is_direct_call(); }
void SetActorTaskSpec(TaskSpecBuilder &builder, const TaskTransportType transport_type,
const ObjectID new_cursor);
void Serialize(std::string *output);
/// Reset the handle state next task submitted.
///
/// This should be called whenever the actor is restarted, since the new
/// instance of the actor does not have the previous sequence number.
/// TODO: We should also move the other actor state (status and IP) inside
/// ActorHandle and reset them in this method.
void Reset();
// Mark the actor handle as dead.
void MarkDead() {
absl::MutexLock lock(&mutex_);
state_ = rpc::ActorTableData::DEAD;
}
// Returns whether the actor is known to be dead.
bool IsDead() const {
absl::MutexLock lock(&mutex_);
return state_ == rpc::ActorTableData::DEAD;
}
private:
// Protobuf-defined persistent state of the actor handle.
const ray::rpc::ActorHandle inner_;
/// The actor's state (alive or dead). This defaults to ALIVE. Once marked
/// DEAD, the actor handle can never go back to being ALIVE.
rpc::ActorTableData::ActorState state_ GUARDED_BY(mutex_) = rpc::ActorTableData::ALIVE;
/// The unique id of the dummy object returned by the previous task.
/// TODO: This can be removed once we schedule actor tasks by task counter
/// only.
// TODO: Save this state in the core worker.
ObjectID actor_cursor_ GUARDED_BY(mutex_);
// Number of tasks that have been submitted on this handle.
uint64_t task_counter_ GUARDED_BY(mutex_) = 0;
/// Mutex to protect fields in the actor handle.
mutable absl::Mutex mutex_;
FRIEND_TEST(ZeroNodeTest, TestActorHandle);
};
} // namespace ray
#endif // RAY_CORE_WORKER_ACTOR_HANDLE_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/actor_manager.cc
|
C++
|
#include "ray/core_worker/actor_manager.h"
#include "ray/gcs/pb_util.h"
#include "ray/gcs/redis_accessor.h"
namespace ray {
void ActorManager::PublishTerminatedActor(const TaskSpecification &actor_creation_task) {
auto actor_id = actor_creation_task.ActorCreationId();
auto data = gcs::CreateActorTableData(actor_creation_task, rpc::Address(),
rpc::ActorTableData::DEAD, 0);
auto update_callback = [actor_id](Status status) {
if (!status.ok()) {
// Only one node at a time should succeed at creating or updating the actor.
RAY_LOG(ERROR) << "Failed to update state to DEAD for actor " << actor_id
<< ", error: " << status.ToString();
}
};
RAY_CHECK_OK(actor_accessor_.AsyncRegister(data, update_callback));
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/actor_manager.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_ACTOR_MANAGER_H
#define RAY_CORE_WORKER_ACTOR_MANAGER_H
#include "ray/core_worker/actor_handle.h"
#include "ray/gcs/redis_gcs_client.h"
namespace ray {
// Interface for testing.
class ActorManagerInterface {
public:
virtual void PublishTerminatedActor(const TaskSpecification &actor_creation_task) = 0;
virtual ~ActorManagerInterface() {}
};
/// Class to manage lifetimes of actors that we create (actor children).
/// Currently this class is only used to publish actor DEAD event
/// for actor creation task failures. All other cases are managed
/// by raylet.
class ActorManager : public ActorManagerInterface {
public:
ActorManager(gcs::ActorInfoAccessor &actor_accessor)
: actor_accessor_(actor_accessor) {}
/// Called when an actor that we own can no longer be restarted.
void PublishTerminatedActor(const TaskSpecification &actor_creation_task) override;
private:
/// Global database of actors.
gcs::ActorInfoAccessor &actor_accessor_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_ACTOR_MANAGER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/common.cc
|
C++
|
#include "ray/core_worker/common.h"
namespace ray {
std::string WorkerTypeString(WorkerType type) {
if (type == WorkerType::DRIVER) {
return "driver";
} else if (type == WorkerType::WORKER) {
return "worker";
}
RAY_CHECK(false);
return "";
}
std::string LanguageString(Language language) {
if (language == Language::PYTHON) {
return "python";
} else if (language == Language::JAVA) {
return "java";
}
RAY_CHECK(false);
return "";
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/common.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_COMMON_H
#define RAY_CORE_WORKER_COMMON_H
#include <string>
#include "ray/common/id.h"
#include "ray/common/ray_object.h"
#include "ray/common/task/task_spec.h"
#include "ray/raylet/raylet_client.h"
#include "ray/util/util.h"
namespace ray {
using WorkerType = rpc::WorkerType;
// Return a string representation of the worker type.
std::string WorkerTypeString(WorkerType type);
// Return a string representation of the language.
std::string LanguageString(Language language);
/// Information about a remote function.
class RayFunction {
public:
RayFunction() {}
RayFunction(Language language, const std::vector<std::string> &function_descriptor)
: language_(language), function_descriptor_(function_descriptor) {}
Language GetLanguage() const { return language_; }
const std::vector<std::string> &GetFunctionDescriptor() const {
return function_descriptor_;
}
private:
Language language_;
std::vector<std::string> function_descriptor_;
};
/// Argument of a task.
class TaskArg {
public:
/// Create a pass-by-reference task argument.
///
/// \param[in] object_id Id of the argument.
/// \return The task argument.
static TaskArg PassByReference(const ObjectID &object_id) {
return TaskArg(std::make_shared<ObjectID>(object_id), nullptr);
}
/// Create a pass-by-value task argument.
///
/// \param[in] value Value of the argument.
/// \return The task argument.
static TaskArg PassByValue(const std::shared_ptr<RayObject> &value) {
RAY_CHECK(value) << "Value can't be null.";
return TaskArg(nullptr, value);
}
/// Return true if this argument is passed by reference, false if passed by value.
bool IsPassedByReference() const { return id_ != nullptr; }
/// Get the reference object ID.
const ObjectID &GetReference() const {
RAY_CHECK(id_ != nullptr) << "This argument isn't passed by reference.";
return *id_;
}
/// Get the value.
const RayObject &GetValue() const {
RAY_CHECK(value_ != nullptr) << "This argument isn't passed by value.";
return *value_;
}
private:
TaskArg(const std::shared_ptr<ObjectID> id, const std::shared_ptr<RayObject> value)
: id_(id), value_(value) {}
/// Id of the argument if passed by reference, otherwise nullptr.
const std::shared_ptr<ObjectID> id_;
/// Value of the argument if passed by value, otherwise nullptr.
const std::shared_ptr<RayObject> value_;
};
/// Options for all tasks (actor and non-actor) except for actor creation.
struct TaskOptions {
TaskOptions() {}
TaskOptions(int num_returns, bool is_direct_call,
std::unordered_map<std::string, double> &resources)
: num_returns(num_returns), is_direct_call(is_direct_call), resources(resources) {}
/// Number of returns of this task.
int num_returns = 1;
/// Whether to use the direct task transport.
bool is_direct_call = false;
/// Resources required by this task.
std::unordered_map<std::string, double> resources;
};
/// Options for actor creation tasks.
struct ActorCreationOptions {
ActorCreationOptions() {}
ActorCreationOptions(uint64_t max_reconstructions, bool is_direct_call,
int max_concurrency,
const std::unordered_map<std::string, double> &resources,
const std::unordered_map<std::string, double> &placement_resources,
const std::vector<std::string> &dynamic_worker_options,
bool is_detached, bool is_asyncio)
: max_reconstructions(max_reconstructions),
is_direct_call(is_direct_call),
max_concurrency(max_concurrency),
resources(resources),
placement_resources(placement_resources),
dynamic_worker_options(dynamic_worker_options),
is_detached(is_detached),
is_asyncio(is_asyncio){};
/// Maximum number of times that the actor should be reconstructed when it dies
/// unexpectedly. It must be non-negative. If it's 0, the actor won't be reconstructed.
const uint64_t max_reconstructions = 0;
/// Whether to use direct actor call. If this is set to true, callers will submit
/// tasks directly to the created actor without going through raylet.
const bool is_direct_call = false;
/// The max number of concurrent tasks to run on this direct call actor.
const int max_concurrency = 1;
/// Resources required by the whole lifetime of this actor.
const std::unordered_map<std::string, double> resources;
/// Resources required to place this actor.
const std::unordered_map<std::string, double> placement_resources;
/// The dynamic options used in the worker command when starting a worker process for
/// an actor creation task.
const std::vector<std::string> dynamic_worker_options;
/// Whether to keep the actor persistent after driver exit. If true, this will set
/// the worker to not be destroyed after the driver shutdown.
const bool is_detached = false;
/// Whether to use async mode of direct actor call. is_direct_call must be true.
const bool is_asyncio = false;
};
} // namespace ray
#endif // RAY_CORE_WORKER_COMMON_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/context.cc
|
C++
|
#include "ray/core_worker/context.h"
namespace ray {
/// per-thread context for core worker.
struct WorkerThreadContext {
WorkerThreadContext()
: current_task_id_(TaskID::ForFakeTask()), task_index_(0), put_index_(0) {}
int GetNextTaskIndex() { return ++task_index_; }
int GetNextPutIndex() { return ++put_index_; }
const TaskID &GetCurrentTaskID() const { return current_task_id_; }
std::shared_ptr<const TaskSpecification> GetCurrentTask() const {
return current_task_;
}
void SetCurrentTaskId(const TaskID &task_id) { current_task_id_ = task_id; }
void SetCurrentTask(const TaskSpecification &task_spec) {
RAY_CHECK(task_index_ == 0);
RAY_CHECK(put_index_ == 0);
SetCurrentTaskId(task_spec.TaskId());
current_task_ = std::make_shared<const TaskSpecification>(task_spec);
}
void ResetCurrentTask(const TaskSpecification &task_spec) {
SetCurrentTaskId(TaskID::Nil());
task_index_ = 0;
put_index_ = 0;
}
private:
/// The task ID for current task.
TaskID current_task_id_;
/// The current task.
std::shared_ptr<const TaskSpecification> current_task_;
/// Number of tasks that have been submitted from current task.
int task_index_;
/// Number of objects that have been put from current task.
int put_index_;
};
thread_local std::unique_ptr<WorkerThreadContext> WorkerContext::thread_context_ =
nullptr;
WorkerContext::WorkerContext(WorkerType worker_type, const JobID &job_id)
: worker_type_(worker_type),
worker_id_(worker_type_ == WorkerType::DRIVER ? ComputeDriverIdFromJob(job_id)
: WorkerID::FromRandom()),
current_job_id_(worker_type_ == WorkerType::DRIVER ? job_id : JobID::Nil()),
current_actor_id_(ActorID::Nil()),
main_thread_id_(boost::this_thread::get_id()) {
// For worker main thread which initializes the WorkerContext,
// set task_id according to whether current worker is a driver.
// (For other threads it's set to random ID via GetThreadContext).
GetThreadContext().SetCurrentTaskId((worker_type_ == WorkerType::DRIVER)
? TaskID::ForDriverTask(job_id)
: TaskID::Nil());
}
const WorkerType WorkerContext::GetWorkerType() const { return worker_type_; }
const WorkerID &WorkerContext::GetWorkerID() const { return worker_id_; }
int WorkerContext::GetNextTaskIndex() { return GetThreadContext().GetNextTaskIndex(); }
int WorkerContext::GetNextPutIndex() { return GetThreadContext().GetNextPutIndex(); }
const JobID &WorkerContext::GetCurrentJobID() const { return current_job_id_; }
const TaskID &WorkerContext::GetCurrentTaskID() const {
return GetThreadContext().GetCurrentTaskID();
}
void WorkerContext::SetCurrentJobId(const JobID &job_id) { current_job_id_ = job_id; }
void WorkerContext::SetCurrentTaskId(const TaskID &task_id) {
GetThreadContext().SetCurrentTaskId(task_id);
}
void WorkerContext::SetCurrentTask(const TaskSpecification &task_spec) {
GetThreadContext().SetCurrentTask(task_spec);
if (task_spec.IsNormalTask()) {
RAY_CHECK(current_job_id_.IsNil());
SetCurrentJobId(task_spec.JobId());
current_task_is_direct_call_ = task_spec.IsDirectCall();
} else if (task_spec.IsActorCreationTask()) {
RAY_CHECK(current_job_id_.IsNil());
SetCurrentJobId(task_spec.JobId());
RAY_CHECK(current_actor_id_.IsNil());
current_actor_id_ = task_spec.ActorCreationId();
current_actor_is_direct_call_ = task_spec.IsDirectActorCreationCall();
current_actor_max_concurrency_ = task_spec.MaxActorConcurrency();
current_actor_is_asyncio_ = task_spec.IsAsyncioActor();
} else if (task_spec.IsActorTask()) {
RAY_CHECK(current_job_id_ == task_spec.JobId());
RAY_CHECK(current_actor_id_ == task_spec.ActorId());
} else {
RAY_CHECK(false);
}
}
void WorkerContext::ResetCurrentTask(const TaskSpecification &task_spec) {
GetThreadContext().ResetCurrentTask(task_spec);
if (task_spec.IsNormalTask()) {
SetCurrentJobId(JobID::Nil());
}
}
std::shared_ptr<const TaskSpecification> WorkerContext::GetCurrentTask() const {
return GetThreadContext().GetCurrentTask();
}
const ActorID &WorkerContext::GetCurrentActorID() const { return current_actor_id_; }
bool WorkerContext::CurrentThreadIsMain() const {
return boost::this_thread::get_id() == main_thread_id_;
}
bool WorkerContext::ShouldReleaseResourcesOnBlockingCalls() const {
// Check if we need to release resources when we block:
// - Driver doesn't acquire resources and thus doesn't need to release.
// - We only support lifetime resources for direct actors, which can be
// acquired when the actor is created, per call resources are not supported,
// thus we don't need to release resources for direct actor call.
return worker_type_ != WorkerType::DRIVER && !CurrentActorIsDirectCall() &&
CurrentThreadIsMain();
}
bool WorkerContext::CurrentActorIsDirectCall() const {
return current_actor_is_direct_call_;
}
bool WorkerContext::CurrentTaskIsDirectCall() const {
return current_task_is_direct_call_ || current_actor_is_direct_call_;
}
int WorkerContext::CurrentActorMaxConcurrency() const {
return current_actor_max_concurrency_;
}
bool WorkerContext::CurrentActorIsAsync() const { return current_actor_is_asyncio_; }
WorkerThreadContext &WorkerContext::GetThreadContext() {
if (thread_context_ == nullptr) {
thread_context_ = std::unique_ptr<WorkerThreadContext>(new WorkerThreadContext());
}
return *thread_context_;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/context.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_CONTEXT_H
#define RAY_CORE_WORKER_CONTEXT_H
#include <boost/thread.hpp>
#include "ray/common/task/task_spec.h"
#include "ray/core_worker/common.h"
namespace ray {
struct WorkerThreadContext;
class WorkerContext {
public:
WorkerContext(WorkerType worker_type, const JobID &job_id);
const WorkerType GetWorkerType() const;
const WorkerID &GetWorkerID() const;
const JobID &GetCurrentJobID() const;
const TaskID &GetCurrentTaskID() const;
// TODO(edoakes): remove this once Python core worker uses the task interfaces.
void SetCurrentJobId(const JobID &job_id);
// TODO(edoakes): remove this once Python core worker uses the task interfaces.
void SetCurrentTaskId(const TaskID &task_id);
void SetCurrentTask(const TaskSpecification &task_spec);
void ResetCurrentTask(const TaskSpecification &task_spec);
std::shared_ptr<const TaskSpecification> GetCurrentTask() const;
const ActorID &GetCurrentActorID() const;
/// Returns whether the current thread is the main worker thread.
bool CurrentThreadIsMain() const;
/// Returns whether we should Block/Unblock through the raylet on Get/Wait.
/// This only applies to direct task calls.
bool ShouldReleaseResourcesOnBlockingCalls() const;
/// Returns whether we are in a direct call actor.
bool CurrentActorIsDirectCall() const;
/// Returns whether we are in a direct call task. This encompasses both direct
/// actor and normal tasks.
bool CurrentTaskIsDirectCall() const;
int CurrentActorMaxConcurrency() const;
bool CurrentActorIsAsync() const;
int GetNextTaskIndex();
int GetNextPutIndex();
private:
const WorkerType worker_type_;
const WorkerID worker_id_;
JobID current_job_id_;
ActorID current_actor_id_;
bool current_actor_is_direct_call_ = false;
bool current_task_is_direct_call_ = false;
int current_actor_max_concurrency_ = 1;
bool current_actor_is_asyncio_ = false;
/// The id of the (main) thread that constructed this worker context.
boost::thread::id main_thread_id_;
private:
static WorkerThreadContext &GetThreadContext();
/// Per-thread worker context.
static thread_local std::unique_ptr<WorkerThreadContext> thread_context_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_CONTEXT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/core_worker.cc
|
C++
|
#include "ray/core_worker/core_worker.h"
#include <cstdlib>
#include "boost/fiber/all.hpp"
#include "ray/common/ray_config.h"
#include "ray/common/task/task_util.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
#include "ray/core_worker/transport/raylet_transport.h"
#include "ray/util/util.h"
namespace {
// Duration between internal book-keeping heartbeats.
const int kInternalHeartbeatMillis = 1000;
void BuildCommonTaskSpec(
ray::TaskSpecBuilder &builder, const JobID &job_id, const TaskID &task_id,
const TaskID ¤t_task_id, const int task_index, const TaskID &caller_id,
const ray::rpc::Address &address, const ray::RayFunction &function,
const std::vector<ray::TaskArg> &args, uint64_t num_returns,
const std::unordered_map<std::string, double> &required_resources,
const std::unordered_map<std::string, double> &required_placement_resources,
ray::TaskTransportType transport_type, std::vector<ObjectID> *return_ids) {
// Build common task spec.
builder.SetCommonTaskSpec(task_id, function.GetLanguage(),
function.GetFunctionDescriptor(), job_id, current_task_id,
task_index, caller_id, address, num_returns,
transport_type == ray::TaskTransportType::DIRECT,
required_resources, required_placement_resources);
// Set task arguments.
for (const auto &arg : args) {
if (arg.IsPassedByReference()) {
builder.AddByRefArg(arg.GetReference());
} else {
builder.AddByValueArg(arg.GetValue());
}
}
// Compute return IDs.
return_ids->resize(num_returns);
for (size_t i = 0; i < num_returns; i++) {
(*return_ids)[i] =
ObjectID::ForTaskReturn(task_id, i + 1,
/*transport_type=*/static_cast<int>(transport_type));
}
}
// Group object ids according the the corresponding store providers.
void GroupObjectIdsByStoreProvider(const std::vector<ObjectID> &object_ids,
absl::flat_hash_set<ObjectID> *plasma_object_ids,
absl::flat_hash_set<ObjectID> *memory_object_ids) {
for (const auto &object_id : object_ids) {
if (object_id.IsDirectCallType()) {
memory_object_ids->insert(object_id);
} else {
plasma_object_ids->insert(object_id);
}
}
}
} // namespace
namespace ray {
CoreWorker::CoreWorker(const WorkerType worker_type, const Language language,
const std::string &store_socket, const std::string &raylet_socket,
const JobID &job_id, const gcs::GcsClientOptions &gcs_options,
const std::string &log_dir, const std::string &node_ip_address,
int node_manager_port,
const TaskExecutionCallback &task_execution_callback,
std::function<Status()> check_signals, bool ref_counting_enabled)
: worker_type_(worker_type),
language_(language),
log_dir_(log_dir),
ref_counting_enabled_(ref_counting_enabled),
check_signals_(check_signals),
worker_context_(worker_type, job_id),
io_work_(io_service_),
client_call_manager_(new rpc::ClientCallManager(io_service_)),
heartbeat_timer_(io_service_),
internal_timer_(io_service_),
core_worker_server_(WorkerTypeString(worker_type), 0 /* let grpc choose a port */),
reference_counter_(std::make_shared<ReferenceCounter>()),
task_queue_length_(0),
num_executed_tasks_(0),
task_execution_service_work_(task_execution_service_),
task_execution_callback_(task_execution_callback),
resource_ids_(new ResourceMappingType()),
grpc_service_(io_service_, *this) {
// Initialize logging if log_dir is passed. Otherwise, it must be initialized
// and cleaned up by the caller.
if (log_dir_ != "") {
std::stringstream app_name;
app_name << LanguageString(language_) << "-" << WorkerTypeString(worker_type_) << "-"
<< worker_context_.GetWorkerID();
RayLog::StartRayLog(app_name.str(), RayLogLevel::INFO, log_dir_);
RayLog::InstallFailureSignalHandler();
}
RAY_LOG(INFO) << "Initializing worker " << worker_context_.GetWorkerID();
// Initialize gcs client.
gcs_client_ = std::make_shared<gcs::RedisGcsClient>(gcs_options);
RAY_CHECK_OK(gcs_client_->Connect(io_service_));
actor_manager_ = std::unique_ptr<ActorManager>(new ActorManager(gcs_client_->Actors()));
// Initialize profiler.
profiler_ = std::make_shared<worker::Profiler>(worker_context_, node_ip_address,
io_service_, gcs_client_);
// Initialize task receivers.
if (worker_type_ == WorkerType::WORKER) {
RAY_CHECK(task_execution_callback_ != nullptr);
auto execute_task = std::bind(&CoreWorker::ExecuteTask, this, std::placeholders::_1,
std::placeholders::_2, std::placeholders::_3);
auto exit = [this](bool intentional) {
// Release the resources early in case draining takes a long time.
RAY_CHECK_OK(local_raylet_client_->NotifyDirectCallTaskBlocked());
task_manager_->DrainAndShutdown([this, intentional]() {
// To avoid problems, make sure shutdown is always called from the same
// event loop each time.
task_execution_service_.post([this, intentional]() {
if (intentional) {
Disconnect(); // Notify the raylet this is an intentional exit.
}
Shutdown();
});
});
};
raylet_task_receiver_ =
std::unique_ptr<CoreWorkerRayletTaskReceiver>(new CoreWorkerRayletTaskReceiver(
worker_context_.GetWorkerID(), local_raylet_client_, execute_task, exit));
direct_task_receiver_ = std::unique_ptr<CoreWorkerDirectTaskReceiver>(
new CoreWorkerDirectTaskReceiver(worker_context_, local_raylet_client_,
task_execution_service_, execute_task, exit));
}
// Start RPC server after all the task receivers are properly initialized.
core_worker_server_.RegisterService(grpc_service_);
core_worker_server_.Run();
// Initialize raylet client.
// TODO(zhijunfu): currently RayletClient would crash in its constructor if it cannot
// connect to Raylet after a number of retries, this can be changed later
// so that the worker (java/python .etc) can retrieve and handle the error
// instead of crashing.
auto grpc_client = rpc::NodeManagerWorkerClient::make(
node_ip_address, node_manager_port, *client_call_manager_);
ClientID local_raylet_id;
local_raylet_client_ = std::shared_ptr<raylet::RayletClient>(new raylet::RayletClient(
std::move(grpc_client), raylet_socket, worker_context_.GetWorkerID(),
(worker_type_ == ray::WorkerType::WORKER), worker_context_.GetCurrentJobID(),
language_, &local_raylet_id, core_worker_server_.GetPort()));
connected_ = true;
// Set our own address.
RAY_CHECK(!local_raylet_id.IsNil());
rpc_address_.set_ip_address(node_ip_address);
rpc_address_.set_port(core_worker_server_.GetPort());
rpc_address_.set_raylet_id(local_raylet_id.Binary());
rpc_address_.set_worker_id(worker_context_.GetWorkerID().Binary());
// Set timer to periodically send heartbeats containing active object IDs to the raylet.
// If the heartbeat timeout is < 0, the heartbeats are disabled.
if (RayConfig::instance().worker_heartbeat_timeout_milliseconds() >= 0) {
// Seed using current time.
std::srand(std::time(nullptr));
// Randomly choose a time from [0, timeout]) to send the first heartbeat to avoid all
// workers sending heartbeats at the same time.
int64_t heartbeat_timeout =
std::rand() % RayConfig::instance().worker_heartbeat_timeout_milliseconds();
heartbeat_timer_.expires_from_now(
boost::asio::chrono::milliseconds(heartbeat_timeout));
heartbeat_timer_.async_wait(boost::bind(&CoreWorker::ReportActiveObjectIDs, this));
}
internal_timer_.expires_from_now(
boost::asio::chrono::milliseconds(kInternalHeartbeatMillis));
internal_timer_.async_wait(boost::bind(&CoreWorker::InternalHeartbeat, this));
io_thread_ = std::thread(&CoreWorker::RunIOService, this);
plasma_store_provider_.reset(new CoreWorkerPlasmaStoreProvider(
store_socket, local_raylet_client_, check_signals_));
memory_store_.reset(new CoreWorkerMemoryStore(
[this](const RayObject &obj, const ObjectID &obj_id) {
RAY_CHECK_OK(plasma_store_provider_->Put(obj, obj_id));
},
ref_counting_enabled ? reference_counter_ : nullptr, local_raylet_client_));
task_manager_.reset(new TaskManager(
memory_store_, reference_counter_, actor_manager_,
[this](const TaskSpecification &spec) {
// Retry after a delay to emulate the existing Raylet reconstruction
// behaviour. TODO(ekl) backoff exponentially.
RAY_LOG(ERROR) << "Will resubmit task after a 5 second delay: "
<< spec.DebugString();
absl::MutexLock lock(&mutex_);
to_resubmit_.push_back(std::make_pair(current_time_ms() + 5000, spec));
}));
// Create an entry for the driver task in the task table. This task is
// added immediately with status RUNNING. This allows us to push errors
// related to this driver task back to the driver. For example, if the
// driver creates an object that is later evicted, we should notify the
// user that we're unable to reconstruct the object, since we cannot
// rerun the driver.
if (worker_type_ == WorkerType::DRIVER) {
TaskSpecBuilder builder;
std::vector<std::string> empty_descriptor;
std::unordered_map<std::string, double> empty_resources;
const TaskID task_id = TaskID::ForDriverTask(worker_context_.GetCurrentJobID());
builder.SetCommonTaskSpec(
task_id, language_, empty_descriptor, worker_context_.GetCurrentJobID(),
TaskID::ComputeDriverTaskId(worker_context_.GetWorkerID()), 0, GetCallerId(),
rpc_address_, 0, false, empty_resources, empty_resources);
std::shared_ptr<gcs::TaskTableData> data = std::make_shared<gcs::TaskTableData>();
data->mutable_task()->mutable_task_spec()->CopyFrom(builder.Build().GetMessage());
RAY_CHECK_OK(gcs_client_->Tasks().AsyncAdd(data, nullptr));
SetCurrentTaskId(task_id);
}
auto client_factory = [this](const std::string ip_address, int port) {
return std::shared_ptr<rpc::CoreWorkerClient>(
new rpc::CoreWorkerClient(ip_address, port, *client_call_manager_));
};
direct_actor_submitter_ = std::unique_ptr<CoreWorkerDirectActorTaskSubmitter>(
new CoreWorkerDirectActorTaskSubmitter(rpc_address_, client_factory, memory_store_,
task_manager_));
direct_task_submitter_ =
std::unique_ptr<CoreWorkerDirectTaskSubmitter>(new CoreWorkerDirectTaskSubmitter(
rpc_address_, local_raylet_client_, client_factory,
[this](const std::string ip_address, int port) {
auto grpc_client = rpc::NodeManagerWorkerClient::make(ip_address, port,
*client_call_manager_);
return std::shared_ptr<raylet::RayletClient>(
new raylet::RayletClient(std::move(grpc_client)));
},
memory_store_, task_manager_, local_raylet_id,
RayConfig::instance().worker_lease_timeout_milliseconds()));
future_resolver_.reset(new FutureResolver(memory_store_, client_factory));
// Unfortunately the raylet client has to be constructed after the receivers.
if (direct_task_receiver_ != nullptr) {
direct_task_receiver_->Init(client_factory, rpc_address_);
}
}
CoreWorker::~CoreWorker() {
io_service_.stop();
io_thread_.join();
if (log_dir_ != "") {
RayLog::ShutDownRayLog();
}
}
void CoreWorker::Shutdown() {
io_service_.stop();
if (worker_type_ == WorkerType::WORKER) {
task_execution_service_.stop();
}
}
void CoreWorker::Disconnect() {
io_service_.stop();
if (connected_) {
connected_ = false;
if (gcs_client_) {
gcs_client_->Disconnect();
}
if (local_raylet_client_) {
RAY_IGNORE_EXPR(local_raylet_client_->Disconnect());
}
}
}
void CoreWorker::RunIOService() {
#ifdef _WIN32
// TODO(mehrdadn): Is there an equivalent for Windows we need here?
#else
// Block SIGINT and SIGTERM so they will be handled by the main thread.
sigset_t mask;
sigemptyset(&mask);
sigaddset(&mask, SIGINT);
sigaddset(&mask, SIGTERM);
pthread_sigmask(SIG_BLOCK, &mask, NULL);
#endif
io_service_.run();
}
void CoreWorker::SetCurrentTaskId(const TaskID &task_id) {
worker_context_.SetCurrentTaskId(task_id);
main_thread_task_id_ = task_id;
bool not_actor_task = false;
{
absl::MutexLock lock(&mutex_);
not_actor_task = actor_id_.IsNil();
}
// Clear all actor handles at the end of each non-actor task.
if (not_actor_task && task_id.IsNil()) {
absl::MutexLock lock(&actor_handles_mutex_);
for (const auto &handle : actor_handles_) {
RAY_CHECK_OK(gcs_client_->Actors().AsyncUnsubscribe(handle.first, nullptr));
}
actor_handles_.clear();
}
}
void CoreWorker::ReportActiveObjectIDs() {
std::unordered_set<ObjectID> active_object_ids;
size_t max_active = RayConfig::instance().raylet_max_active_object_ids();
if (max_active > 0) {
active_object_ids = reference_counter_->GetAllInScopeObjectIDs();
if (active_object_ids.size() > max_active) {
RAY_LOG(INFO) << active_object_ids.size() << " object IDs are currently in scope.";
}
}
RAY_LOG(DEBUG) << "Sending " << active_object_ids.size() << " object IDs to raylet.";
if (!local_raylet_client_->ReportActiveObjectIDs(active_object_ids).ok()) {
RAY_LOG(ERROR) << "Raylet connection failed. Shutting down.";
Shutdown();
}
// Reset the timer from the previous expiration time to avoid drift.
heartbeat_timer_.expires_at(
heartbeat_timer_.expiry() +
boost::asio::chrono::milliseconds(
RayConfig::instance().worker_heartbeat_timeout_milliseconds()));
heartbeat_timer_.async_wait(boost::bind(&CoreWorker::ReportActiveObjectIDs, this));
}
void CoreWorker::InternalHeartbeat() {
absl::MutexLock lock(&mutex_);
while (!to_resubmit_.empty() && current_time_ms() > to_resubmit_.front().first) {
RAY_CHECK_OK(direct_task_submitter_->SubmitTask(to_resubmit_.front().second));
to_resubmit_.pop_front();
}
internal_timer_.expires_at(internal_timer_.expiry() +
boost::asio::chrono::milliseconds(kInternalHeartbeatMillis));
internal_timer_.async_wait(boost::bind(&CoreWorker::InternalHeartbeat, this));
}
void CoreWorker::PromoteToPlasmaAndGetOwnershipInfo(const ObjectID &object_id,
TaskID *owner_id,
rpc::Address *owner_address) {
RAY_CHECK(object_id.IsDirectCallType());
auto value = memory_store_->GetOrPromoteToPlasma(object_id);
if (value) {
RAY_CHECK_OK(plasma_store_provider_->Put(*value, object_id));
}
auto has_owner = reference_counter_->GetOwner(object_id, owner_id, owner_address);
RAY_CHECK(has_owner)
<< "Object IDs generated randomly (ObjectID.from_random()) or out-of-band "
"(ObjectID.from_binary(...)) cannot be serialized because Ray does not know "
"which task will create them. "
"If this was not how your object ID was generated, please file an issue "
"at https://github.com/ray-project/ray/issues/";
}
void CoreWorker::RegisterOwnershipInfoAndResolveFuture(
const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address) {
// Add the object's owner to the local metadata in case it gets serialized
// again.
reference_counter_->AddBorrowedObject(object_id, owner_id, owner_address);
RAY_CHECK(!owner_id.IsNil());
// We will ask the owner about the object until the object is
// created or we can no longer reach the owner.
future_resolver_->ResolveFutureAsync(object_id, owner_id, owner_address);
}
Status CoreWorker::SetClientOptions(std::string name, int64_t limit_bytes) {
// Currently only the Plasma store supports client options.
return plasma_store_provider_->SetClientOptions(name, limit_bytes);
}
Status CoreWorker::Put(const RayObject &object, ObjectID *object_id) {
*object_id = ObjectID::ForPut(worker_context_.GetCurrentTaskID(),
worker_context_.GetNextPutIndex(),
static_cast<uint8_t>(TaskTransportType::RAYLET));
reference_counter_->AddOwnedObject(*object_id, GetCallerId(), rpc_address_);
RAY_RETURN_NOT_OK(Put(object, *object_id));
// Tell the raylet to pin the object **after** it is created.
RAY_CHECK_OK(local_raylet_client_->PinObjectIDs(rpc_address_, {*object_id}));
return Status::OK();
}
Status CoreWorker::Put(const RayObject &object, const ObjectID &object_id) {
RAY_CHECK(object_id.GetTransportType() ==
static_cast<uint8_t>(TaskTransportType::RAYLET))
<< "Invalid transport type flag in object ID: " << object_id.GetTransportType();
return plasma_store_provider_->Put(object, object_id);
}
Status CoreWorker::Create(const std::shared_ptr<Buffer> &metadata, const size_t data_size,
ObjectID *object_id, std::shared_ptr<Buffer> *data) {
*object_id = ObjectID::ForPut(worker_context_.GetCurrentTaskID(),
worker_context_.GetNextPutIndex(),
static_cast<uint8_t>(TaskTransportType::RAYLET));
return Create(metadata, data_size, *object_id, data);
}
Status CoreWorker::Create(const std::shared_ptr<Buffer> &metadata, const size_t data_size,
const ObjectID &object_id, std::shared_ptr<Buffer> *data) {
return plasma_store_provider_->Create(metadata, data_size, object_id, data);
}
Status CoreWorker::Seal(const ObjectID &object_id, bool owns_object, bool pin_object) {
RAY_RETURN_NOT_OK(plasma_store_provider_->Seal(object_id));
if (owns_object) {
reference_counter_->AddOwnedObject(object_id, GetCallerId(), rpc_address_);
if (pin_object) {
// Tell the raylet to pin the object **after** it is created.
RAY_CHECK_OK(local_raylet_client_->PinObjectIDs(rpc_address_, {object_id}));
}
}
return Status::OK();
}
Status CoreWorker::Get(const std::vector<ObjectID> &ids, const int64_t timeout_ms,
std::vector<std::shared_ptr<RayObject>> *results) {
results->resize(ids.size(), nullptr);
absl::flat_hash_set<ObjectID> plasma_object_ids;
absl::flat_hash_set<ObjectID> memory_object_ids;
GroupObjectIdsByStoreProvider(ids, &plasma_object_ids, &memory_object_ids);
bool got_exception = false;
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> result_map;
auto start_time = current_time_ms();
if (!memory_object_ids.empty()) {
RAY_RETURN_NOT_OK(memory_store_->Get(memory_object_ids, timeout_ms, worker_context_,
&result_map, &got_exception));
}
if (!got_exception) {
// If any of the objects have been promoted to plasma, then we retry their
// gets at the provider plasma. Once we get the objects from plasma, we flip
// the transport type again and return them for the original direct call ids.
for (const auto &pair : result_map) {
if (pair.second->IsInPlasmaError()) {
RAY_LOG(INFO) << pair.first << " in plasma, doing fetch-and-get";
plasma_object_ids.insert(pair.first);
}
}
int64_t local_timeout_ms = timeout_ms;
if (timeout_ms >= 0) {
local_timeout_ms = std::max(static_cast<int64_t>(0),
timeout_ms - (current_time_ms() - start_time));
}
RAY_LOG(DEBUG) << "Plasma GET timeout " << local_timeout_ms;
RAY_RETURN_NOT_OK(plasma_store_provider_->Get(plasma_object_ids, local_timeout_ms,
worker_context_, &result_map,
&got_exception));
}
// Loop through `ids` and fill each entry for the `results` vector,
// this ensures that entries `results` have exactly the same order as
// they are in `ids`. When there are duplicate object ids, all the entries
// for the same id are filled in.
bool missing_result = false;
bool will_throw_exception = false;
for (size_t i = 0; i < ids.size(); i++) {
auto pair = result_map.find(ids[i]);
if (pair != result_map.end()) {
(*results)[i] = pair->second;
RAY_CHECK(!pair->second->IsInPlasmaError());
if (pair->second->IsException()) {
// The language bindings should throw an exception if they see this
// object.
will_throw_exception = true;
}
} else {
missing_result = true;
}
}
// If no timeout was set and none of the results will throw an exception,
// then check that we fetched all results before returning.
if (timeout_ms < 0 && !will_throw_exception) {
RAY_CHECK(!missing_result);
}
return Status::OK();
}
Status CoreWorker::Contains(const ObjectID &object_id, bool *has_object) {
bool found = false;
if (object_id.IsDirectCallType()) {
bool in_plasma = false;
found = memory_store_->Contains(object_id, &in_plasma);
if (in_plasma) {
RAY_RETURN_NOT_OK(plasma_store_provider_->Contains(object_id, &found));
}
} else {
RAY_RETURN_NOT_OK(plasma_store_provider_->Contains(object_id, &found));
}
*has_object = found;
return Status::OK();
}
// For any objects that are ErrorType::OBJECT_IN_PLASMA, we need to move them from
// the ready set into the plasma_object_ids set to wait on them there.
void RetryObjectInPlasmaErrors(std::shared_ptr<CoreWorkerMemoryStore> &memory_store,
WorkerContext &worker_context,
absl::flat_hash_set<ObjectID> &memory_object_ids,
absl::flat_hash_set<ObjectID> &plasma_object_ids,
absl::flat_hash_set<ObjectID> &ready) {
for (const auto &mem_id : memory_object_ids) {
if (ready.find(mem_id) != ready.end()) {
std::vector<std::shared_ptr<RayObject>> found;
RAY_CHECK_OK(memory_store->Get({mem_id}, /*num_objects=*/1, /*timeout=*/0,
worker_context,
/*remote_after_get=*/false, &found));
if (found.size() == 1 && found[0]->IsInPlasmaError()) {
memory_object_ids.erase(mem_id);
ready.erase(mem_id);
plasma_object_ids.insert(mem_id);
}
}
}
}
Status CoreWorker::Wait(const std::vector<ObjectID> &ids, int num_objects,
int64_t timeout_ms, std::vector<bool> *results) {
results->resize(ids.size(), false);
if (num_objects <= 0 || num_objects > static_cast<int>(ids.size())) {
return Status::Invalid(
"Number of objects to wait for must be between 1 and the number of ids.");
}
absl::flat_hash_set<ObjectID> plasma_object_ids;
absl::flat_hash_set<ObjectID> memory_object_ids;
GroupObjectIdsByStoreProvider(ids, &plasma_object_ids, &memory_object_ids);
if (plasma_object_ids.size() + memory_object_ids.size() != ids.size()) {
return Status::Invalid("Duplicate object IDs not supported in wait.");
}
// TODO(edoakes): this logic is not ideal, and will have to be addressed
// before we enable direct actor calls in the Python code. If we are waiting
// on a list of objects mixed between multiple store providers, we could
// easily end up in the situation where we're blocked waiting on one store
// provider while another actually has enough objects ready to fulfill
// 'num_objects'. This is partially addressed by trying them all once with
// a timeout of 0, but that does not address the situation where objects
// become available on the second store provider while waiting on the first.
absl::flat_hash_set<ObjectID> ready;
// Wait from both store providers with timeout set to 0. This is to avoid the case
// where we might use up the entire timeout on trying to get objects from one store
// provider before even trying another (which might have all of the objects available).
if (memory_object_ids.size() > 0) {
RAY_RETURN_NOT_OK(memory_store_->Wait(
memory_object_ids,
std::min(static_cast<int>(memory_object_ids.size()), num_objects),
/*timeout_ms=*/0, worker_context_, &ready));
RetryObjectInPlasmaErrors(memory_store_, worker_context_, memory_object_ids,
plasma_object_ids, ready);
}
RAY_CHECK(static_cast<int>(ready.size()) <= num_objects);
if (static_cast<int>(ready.size()) < num_objects && plasma_object_ids.size() > 0) {
RAY_RETURN_NOT_OK(plasma_store_provider_->Wait(
plasma_object_ids,
std::min(static_cast<int>(plasma_object_ids.size()),
num_objects - static_cast<int>(ready.size())),
/*timeout_ms=*/0, worker_context_, &ready));
}
RAY_CHECK(static_cast<int>(ready.size()) <= num_objects);
if (timeout_ms != 0 && static_cast<int>(ready.size()) < num_objects) {
// Clear the ready set and retry. We clear it so that we can compute the number of
// objects to fetch from the memory store easily below.
ready.clear();
int64_t start_time = current_time_ms();
if (memory_object_ids.size() > 0) {
RAY_RETURN_NOT_OK(memory_store_->Wait(
memory_object_ids,
std::min(static_cast<int>(memory_object_ids.size()), num_objects), timeout_ms,
worker_context_, &ready));
RetryObjectInPlasmaErrors(memory_store_, worker_context_, memory_object_ids,
plasma_object_ids, ready);
}
RAY_CHECK(static_cast<int>(ready.size()) <= num_objects);
if (timeout_ms > 0) {
timeout_ms =
std::max(0, static_cast<int>(timeout_ms - (current_time_ms() - start_time)));
}
if (static_cast<int>(ready.size()) < num_objects && plasma_object_ids.size() > 0) {
RAY_RETURN_NOT_OK(plasma_store_provider_->Wait(
plasma_object_ids,
std::min(static_cast<int>(plasma_object_ids.size()),
num_objects - static_cast<int>(ready.size())),
timeout_ms, worker_context_, &ready));
}
RAY_CHECK(static_cast<int>(ready.size()) <= num_objects);
}
for (size_t i = 0; i < ids.size(); i++) {
if (ready.find(ids[i]) != ready.end()) {
results->at(i) = true;
}
}
return Status::OK();
}
Status CoreWorker::Delete(const std::vector<ObjectID> &object_ids, bool local_only,
bool delete_creating_tasks) {
absl::flat_hash_set<ObjectID> plasma_object_ids;
absl::flat_hash_set<ObjectID> memory_object_ids;
GroupObjectIdsByStoreProvider(object_ids, &plasma_object_ids, &memory_object_ids);
// TODO(edoakes): what are the desired semantics for deleting from a non-owner?
// Should we just delete locally or ping the owner and delete globally?
reference_counter_->DeleteReferences(object_ids);
memory_store_->Delete(memory_object_ids, &plasma_object_ids);
RAY_RETURN_NOT_OK(plasma_store_provider_->Delete(plasma_object_ids, local_only,
delete_creating_tasks));
return Status::OK();
}
std::string CoreWorker::MemoryUsageString() {
// Currently only the Plasma store returns a debug string.
return plasma_store_provider_->MemoryUsageString();
}
TaskID CoreWorker::GetCallerId() const {
TaskID caller_id;
ActorID actor_id = GetActorId();
if (!actor_id.IsNil()) {
caller_id = TaskID::ForActorCreationTask(actor_id);
} else {
caller_id = main_thread_task_id_;
}
return caller_id;
}
Status CoreWorker::SubmitTask(const RayFunction &function,
const std::vector<TaskArg> &args,
const TaskOptions &task_options,
std::vector<ObjectID> *return_ids, int max_retries) {
TaskSpecBuilder builder;
const int next_task_index = worker_context_.GetNextTaskIndex();
const auto task_id =
TaskID::ForNormalTask(worker_context_.GetCurrentJobID(),
worker_context_.GetCurrentTaskID(), next_task_index);
const std::unordered_map<std::string, double> required_resources;
// TODO(ekl) offload task building onto a thread pool for performance
BuildCommonTaskSpec(
builder, worker_context_.GetCurrentJobID(), task_id,
worker_context_.GetCurrentTaskID(), next_task_index, GetCallerId(), rpc_address_,
function, args, task_options.num_returns, task_options.resources,
required_resources,
task_options.is_direct_call ? TaskTransportType::DIRECT : TaskTransportType::RAYLET,
return_ids);
TaskSpecification task_spec = builder.Build();
if (task_options.is_direct_call) {
task_manager_->AddPendingTask(GetCallerId(), rpc_address_, task_spec, max_retries);
return direct_task_submitter_->SubmitTask(task_spec);
} else {
return local_raylet_client_->SubmitTask(task_spec);
}
}
Status CoreWorker::CreateActor(const RayFunction &function,
const std::vector<TaskArg> &args,
const ActorCreationOptions &actor_creation_options,
ActorID *return_actor_id) {
const int next_task_index = worker_context_.GetNextTaskIndex();
const ActorID actor_id =
ActorID::Of(worker_context_.GetCurrentJobID(), worker_context_.GetCurrentTaskID(),
next_task_index);
const TaskID actor_creation_task_id = TaskID::ForActorCreationTask(actor_id);
const JobID job_id = worker_context_.GetCurrentJobID();
std::vector<ObjectID> return_ids;
TaskSpecBuilder builder;
BuildCommonTaskSpec(builder, job_id, actor_creation_task_id,
worker_context_.GetCurrentTaskID(), next_task_index, GetCallerId(),
rpc_address_, function, args, 1, actor_creation_options.resources,
actor_creation_options.placement_resources,
actor_creation_options.is_direct_call ? TaskTransportType::DIRECT
: TaskTransportType::RAYLET,
&return_ids);
builder.SetActorCreationTaskSpec(
actor_id, actor_creation_options.max_reconstructions,
actor_creation_options.dynamic_worker_options,
actor_creation_options.is_direct_call, actor_creation_options.max_concurrency,
actor_creation_options.is_detached, actor_creation_options.is_asyncio);
std::unique_ptr<ActorHandle> actor_handle(new ActorHandle(
actor_id, job_id, /*actor_cursor=*/return_ids[0], function.GetLanguage(),
actor_creation_options.is_direct_call, function.GetFunctionDescriptor()));
RAY_CHECK(AddActorHandle(std::move(actor_handle)))
<< "Actor " << actor_id << " already exists";
*return_actor_id = actor_id;
TaskSpecification task_spec = builder.Build();
if (actor_creation_options.is_direct_call) {
task_manager_->AddPendingTask(
GetCallerId(), rpc_address_, task_spec,
std::max(RayConfig::instance().actor_creation_min_retries(),
actor_creation_options.max_reconstructions));
return direct_task_submitter_->SubmitTask(task_spec);
} else {
return local_raylet_client_->SubmitTask(task_spec);
}
}
Status CoreWorker::SubmitActorTask(const ActorID &actor_id, const RayFunction &function,
const std::vector<TaskArg> &args,
const TaskOptions &task_options,
std::vector<ObjectID> *return_ids) {
ActorHandle *actor_handle = nullptr;
RAY_RETURN_NOT_OK(GetActorHandle(actor_id, &actor_handle));
// Add one for actor cursor object id for tasks.
const int num_returns = task_options.num_returns + 1;
const bool is_direct_call = actor_handle->IsDirectCallActor();
const TaskTransportType transport_type =
is_direct_call ? TaskTransportType::DIRECT : TaskTransportType::RAYLET;
// Build common task spec.
TaskSpecBuilder builder;
const int next_task_index = worker_context_.GetNextTaskIndex();
const TaskID actor_task_id = TaskID::ForActorTask(
worker_context_.GetCurrentJobID(), worker_context_.GetCurrentTaskID(),
next_task_index, actor_handle->GetActorID());
const std::unordered_map<std::string, double> required_resources;
BuildCommonTaskSpec(builder, actor_handle->CreationJobID(), actor_task_id,
worker_context_.GetCurrentTaskID(), next_task_index, GetCallerId(),
rpc_address_, function, args, num_returns, task_options.resources,
required_resources, transport_type, return_ids);
const ObjectID new_cursor = return_ids->back();
actor_handle->SetActorTaskSpec(builder, transport_type, new_cursor);
// Remove cursor from return ids.
return_ids->pop_back();
// Submit task.
Status status;
TaskSpecification task_spec = builder.Build();
if (is_direct_call) {
task_manager_->AddPendingTask(GetCallerId(), rpc_address_, task_spec);
if (actor_handle->IsDead()) {
auto status = Status::IOError("sent task to dead actor");
task_manager_->PendingTaskFailed(task_spec.TaskId(), rpc::ErrorType::ACTOR_DIED,
&status);
} else {
status = direct_actor_submitter_->SubmitTask(task_spec);
}
} else {
RAY_CHECK_OK(local_raylet_client_->SubmitTask(task_spec));
}
return status;
}
Status CoreWorker::KillActor(const ActorID &actor_id) {
ActorHandle *actor_handle = nullptr;
RAY_RETURN_NOT_OK(GetActorHandle(actor_id, &actor_handle));
RAY_CHECK(actor_handle->IsDirectCallActor());
return direct_actor_submitter_->KillActor(actor_id);
}
ActorID CoreWorker::DeserializeAndRegisterActorHandle(const std::string &serialized) {
std::unique_ptr<ActorHandle> actor_handle(new ActorHandle(serialized));
const ActorID actor_id = actor_handle->GetActorID();
RAY_UNUSED(AddActorHandle(std::move(actor_handle)));
return actor_id;
}
Status CoreWorker::SerializeActorHandle(const ActorID &actor_id,
std::string *output) const {
ActorHandle *actor_handle = nullptr;
auto status = GetActorHandle(actor_id, &actor_handle);
if (status.ok()) {
actor_handle->Serialize(output);
}
return status;
}
bool CoreWorker::AddActorHandle(std::unique_ptr<ActorHandle> actor_handle) {
absl::MutexLock lock(&actor_handles_mutex_);
const auto &actor_id = actor_handle->GetActorID();
auto inserted = actor_handles_.emplace(actor_id, std::move(actor_handle)).second;
if (inserted) {
// Register a callback to handle actor notifications.
auto actor_notification_callback = [this](const ActorID &actor_id,
const gcs::ActorTableData &actor_data) {
if (actor_data.state() == gcs::ActorTableData::RECONSTRUCTING) {
absl::MutexLock lock(&actor_handles_mutex_);
auto it = actor_handles_.find(actor_id);
RAY_CHECK(it != actor_handles_.end());
if (it->second->IsDirectCallActor()) {
// We have to reset the actor handle since the next instance of the
// actor will not have the last sequence number that we sent.
// TODO: Remove the check for direct calls. We do not reset for the
// raylet codepath because it tries to replay all tasks since the
// last actor checkpoint.
it->second->Reset();
}
direct_actor_submitter_->DisconnectActor(actor_id, false);
} else if (actor_data.state() == gcs::ActorTableData::DEAD) {
direct_actor_submitter_->DisconnectActor(actor_id, true);
ActorHandle *actor_handle = nullptr;
RAY_CHECK_OK(GetActorHandle(actor_id, &actor_handle));
actor_handle->MarkDead();
// We cannot erase the actor handle here because clients can still
// submit tasks to dead actors. This also means we defer unsubscription,
// otherwise we crash when bulk unsubscribing all actor handles.
} else {
direct_actor_submitter_->ConnectActor(actor_id, actor_data.address());
}
RAY_LOG(INFO) << "received notification on actor, state="
<< static_cast<int>(actor_data.state()) << ", actor_id: " << actor_id
<< ", ip address: " << actor_data.address().ip_address()
<< ", port: " << actor_data.address().port() << ", worker_id: "
<< WorkerID::FromBinary(actor_data.address().worker_id())
<< ", raylet_id: "
<< ClientID::FromBinary(actor_data.address().raylet_id());
};
RAY_CHECK_OK(gcs_client_->Actors().AsyncSubscribe(
actor_id, actor_notification_callback, nullptr));
}
return inserted;
}
Status CoreWorker::GetActorHandle(const ActorID &actor_id,
ActorHandle **actor_handle) const {
absl::MutexLock lock(&actor_handles_mutex_);
auto it = actor_handles_.find(actor_id);
if (it == actor_handles_.end()) {
return Status::Invalid("Handle for actor does not exist");
}
*actor_handle = it->second.get();
return Status::OK();
}
std::unique_ptr<worker::ProfileEvent> CoreWorker::CreateProfileEvent(
const std::string &event_type) {
return std::unique_ptr<worker::ProfileEvent>(
new worker::ProfileEvent(profiler_, event_type));
}
void CoreWorker::StartExecutingTasks() { task_execution_service_.run(); }
Status CoreWorker::AllocateReturnObjects(
const std::vector<ObjectID> &object_ids, const std::vector<size_t> &data_sizes,
const std::vector<std::shared_ptr<Buffer>> &metadatas,
std::vector<std::shared_ptr<RayObject>> *return_objects) {
RAY_CHECK(object_ids.size() == metadatas.size());
RAY_CHECK(object_ids.size() == data_sizes.size());
return_objects->resize(object_ids.size(), nullptr);
for (size_t i = 0; i < object_ids.size(); i++) {
bool object_already_exists = false;
std::shared_ptr<Buffer> data_buffer;
if (data_sizes[i] > 0) {
if (worker_context_.CurrentTaskIsDirectCall() &&
static_cast<int64_t>(data_sizes[i]) <
RayConfig::instance().max_direct_call_object_size()) {
data_buffer = std::make_shared<LocalMemoryBuffer>(data_sizes[i]);
} else {
RAY_RETURN_NOT_OK(
Create(metadatas[i], data_sizes[i], object_ids[i], &data_buffer));
object_already_exists = !data_buffer;
}
}
// Leave the return object as a nullptr if there is no data or metadata.
// This allows the caller to prevent the core worker from storing an output
// (e.g., to support ray.experimental.no_return.NoReturn).
if (!object_already_exists && (data_buffer || metadatas[i])) {
return_objects->at(i) = std::make_shared<RayObject>(data_buffer, metadatas[i]);
}
}
return Status::OK();
}
Status CoreWorker::ExecuteTask(const TaskSpecification &task_spec,
const std::shared_ptr<ResourceMappingType> &resource_ids,
std::vector<std::shared_ptr<RayObject>> *return_objects) {
task_queue_length_ -= 1;
num_executed_tasks_ += 1;
if (resource_ids != nullptr) {
resource_ids_ = resource_ids;
}
worker_context_.SetCurrentTask(task_spec);
SetCurrentTaskId(task_spec.TaskId());
{
absl::MutexLock lock(&mutex_);
current_task_ = task_spec;
}
RayFunction func{task_spec.GetLanguage(), task_spec.FunctionDescriptor()};
std::vector<std::shared_ptr<RayObject>> args;
std::vector<ObjectID> arg_reference_ids;
RAY_CHECK_OK(BuildArgsForExecutor(task_spec, &args, &arg_reference_ids));
const auto transport_type = worker_context_.CurrentTaskIsDirectCall()
? TaskTransportType::DIRECT
: TaskTransportType::RAYLET;
std::vector<ObjectID> return_ids;
for (size_t i = 0; i < task_spec.NumReturns(); i++) {
return_ids.push_back(task_spec.ReturnId(i, transport_type));
}
Status status;
TaskType task_type = TaskType::NORMAL_TASK;
if (task_spec.IsActorCreationTask()) {
RAY_CHECK(return_ids.size() > 0);
return_ids.pop_back();
task_type = TaskType::ACTOR_CREATION_TASK;
SetActorId(task_spec.ActorCreationId());
RAY_LOG(INFO) << "Creating actor: " << task_spec.ActorCreationId();
} else if (task_spec.IsActorTask()) {
RAY_CHECK(return_ids.size() > 0);
return_ids.pop_back();
task_type = TaskType::ACTOR_TASK;
}
status = task_execution_callback_(task_type, func,
task_spec.GetRequiredResources().GetResourceMap(),
args, arg_reference_ids, return_ids, return_objects);
for (size_t i = 0; i < return_objects->size(); i++) {
// The object is nullptr if it already existed in the object store.
if (!return_objects->at(i)) {
continue;
}
if (return_objects->at(i)->GetData()->IsPlasmaBuffer()) {
if (!Seal(return_ids[i], /*owns_object=*/false, /*pin_object=*/false).ok()) {
RAY_LOG(FATAL) << "Task " << task_spec.TaskId() << " failed to seal object "
<< return_ids[i] << " in store: " << status.message();
}
} else if (!worker_context_.CurrentTaskIsDirectCall()) {
if (!Put(*return_objects->at(i), return_ids[i]).ok()) {
RAY_LOG(FATAL) << "Task " << task_spec.TaskId() << " failed to put object "
<< return_ids[i] << " in store: " << status.message();
}
}
}
if (task_spec.IsNormalTask() && reference_counter_->NumObjectIDsInScope() != 0) {
RAY_LOG(DEBUG)
<< "There were " << reference_counter_->NumObjectIDsInScope()
<< " ObjectIDs left in scope after executing task " << task_spec.TaskId()
<< ". This is either caused by keeping references to ObjectIDs in Python between "
"tasks (e.g., in global variables) or indicates a problem with Ray's "
"reference counting, and may cause problems in the object store.";
}
SetCurrentTaskId(TaskID::Nil());
worker_context_.ResetCurrentTask(task_spec);
{
absl::MutexLock lock(&mutex_);
current_task_ = TaskSpecification();
}
return status;
}
Status CoreWorker::BuildArgsForExecutor(const TaskSpecification &task,
std::vector<std::shared_ptr<RayObject>> *args,
std::vector<ObjectID> *arg_reference_ids) {
auto num_args = task.NumArgs();
args->resize(num_args);
arg_reference_ids->resize(num_args);
absl::flat_hash_set<ObjectID> by_ref_ids;
absl::flat_hash_map<ObjectID, int> by_ref_indices;
for (size_t i = 0; i < task.NumArgs(); ++i) {
int count = task.ArgIdCount(i);
if (count > 0) {
// pass by reference.
RAY_CHECK(count == 1);
// Direct call type objects that weren't inlined have been promoted to plasma.
// We need to put an OBJECT_IN_PLASMA error here so the subsequent call to Get()
// properly redirects to the plasma store.
if (task.ArgId(i, 0).IsDirectCallType()) {
RAY_CHECK_OK(memory_store_->Put(RayObject(rpc::ErrorType::OBJECT_IN_PLASMA),
task.ArgId(i, 0)));
}
by_ref_ids.insert(task.ArgId(i, 0));
by_ref_indices.emplace(task.ArgId(i, 0), i);
arg_reference_ids->at(i) = task.ArgId(i, 0);
} else {
// pass by value.
std::shared_ptr<LocalMemoryBuffer> data = nullptr;
if (task.ArgDataSize(i)) {
data = std::make_shared<LocalMemoryBuffer>(const_cast<uint8_t *>(task.ArgData(i)),
task.ArgDataSize(i));
}
std::shared_ptr<LocalMemoryBuffer> metadata = nullptr;
if (task.ArgMetadataSize(i)) {
metadata = std::make_shared<LocalMemoryBuffer>(
const_cast<uint8_t *>(task.ArgMetadata(i)), task.ArgMetadataSize(i));
}
args->at(i) = std::make_shared<RayObject>(data, metadata, /*copy_data*/ true);
arg_reference_ids->at(i) = ObjectID::Nil();
}
}
// Fetch by-reference arguments directly from the plasma store.
bool got_exception = false;
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> result_map;
RAY_RETURN_NOT_OK(plasma_store_provider_->Get(by_ref_ids, -1, worker_context_,
&result_map, &got_exception));
for (const auto &it : result_map) {
args->at(by_ref_indices[it.first]) = it.second;
}
return Status::OK();
}
void CoreWorker::HandleAssignTask(const rpc::AssignTaskRequest &request,
rpc::AssignTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) {
if (HandleWrongRecipient(WorkerID::FromBinary(request.intended_worker_id()),
send_reply_callback)) {
return;
}
if (worker_context_.CurrentActorIsDirectCall()) {
send_reply_callback(Status::Invalid("This actor only accepts direct calls."), nullptr,
nullptr);
return;
} else {
task_queue_length_ += 1;
task_execution_service_.post([=] {
raylet_task_receiver_->HandleAssignTask(request, reply, send_reply_callback);
});
}
}
void CoreWorker::HandlePushTask(const rpc::PushTaskRequest &request,
rpc::PushTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) {
if (HandleWrongRecipient(WorkerID::FromBinary(request.intended_worker_id()),
send_reply_callback)) {
return;
}
task_queue_length_ += 1;
task_execution_service_.post([=] {
direct_task_receiver_->HandlePushTask(request, reply, send_reply_callback);
});
}
void CoreWorker::HandleDirectActorCallArgWaitComplete(
const rpc::DirectActorCallArgWaitCompleteRequest &request,
rpc::DirectActorCallArgWaitCompleteReply *reply,
rpc::SendReplyCallback send_reply_callback) {
if (HandleWrongRecipient(WorkerID::FromBinary(request.intended_worker_id()),
send_reply_callback)) {
return;
}
task_execution_service_.post([=] {
direct_task_receiver_->HandleDirectActorCallArgWaitComplete(request, reply,
send_reply_callback);
});
}
void CoreWorker::HandleGetObjectStatus(const rpc::GetObjectStatusRequest &request,
rpc::GetObjectStatusReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
TaskID owner_id = TaskID::FromBinary(request.owner_id());
if (owner_id != GetCallerId()) {
RAY_LOG(INFO) << "Handling GetObjectStatus for object produced by previous task "
<< owner_id.Hex();
}
// We own the task. Reply back to the borrower once the object has been
// created.
// TODO(swang): We could probably just send the object value if it is small
// enough and we have it local.
reply->set_status(rpc::GetObjectStatusReply::CREATED);
if (task_manager_->IsTaskPending(object_id.TaskId())) {
// Acquire a reference and retry. This prevents the object from being
// evicted out from under us before we can start the get.
AddLocalReference(object_id);
if (task_manager_->IsTaskPending(object_id.TaskId())) {
// The task is pending. Send the reply once the task finishes.
memory_store_->GetAsync(object_id,
[send_reply_callback](std::shared_ptr<RayObject> obj) {
send_reply_callback(Status::OK(), nullptr, nullptr);
});
RemoveLocalReference(object_id);
} else {
// We lost the race, the task is done.
RemoveLocalReference(object_id);
send_reply_callback(Status::OK(), nullptr, nullptr);
}
} else {
// The task is done. Send the reply immediately.
send_reply_callback(Status::OK(), nullptr, nullptr);
}
}
void CoreWorker::HandleWaitForObjectEviction(
const rpc::WaitForObjectEvictionRequest &request,
rpc::WaitForObjectEvictionReply *reply, rpc::SendReplyCallback send_reply_callback) {
if (HandleWrongRecipient(WorkerID::FromBinary(request.intended_worker_id()),
send_reply_callback)) {
return;
}
// Send a response to trigger unpinning the object when it is no longer in scope.
auto respond = [send_reply_callback](const ObjectID &object_id) {
RAY_LOG(DEBUG) << "Replying to HandleWaitForObjectEviction for " << object_id;
send_reply_callback(Status::OK(), nullptr, nullptr);
};
ObjectID object_id = ObjectID::FromBinary(request.object_id());
// Returns true if the object was present and the callback was added. It might have
// already been evicted by the time we get this request, in which case we should
// respond immediately so the raylet unpins the object.
if (!reference_counter_->SetDeleteCallback(object_id, respond)) {
RAY_LOG(DEBUG) << "ObjectID reference already gone for " << object_id;
respond(object_id);
}
}
void CoreWorker::HandleKillActor(const rpc::KillActorRequest &request,
rpc::KillActorReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ActorID intended_actor_id = ActorID::FromBinary(request.intended_actor_id());
if (intended_actor_id != worker_context_.GetCurrentActorID()) {
std::ostringstream stream;
stream << "Mismatched ActorID: ignoring KillActor for previous actor "
<< intended_actor_id
<< ", current actor ID: " << worker_context_.GetCurrentActorID();
auto msg = stream.str();
RAY_LOG(ERROR) << msg;
send_reply_callback(Status::Invalid(msg), nullptr, nullptr);
return;
}
RAY_LOG(INFO) << "Got KillActor, exiting immediately...";
if (log_dir_ != "") {
RayLog::ShutDownRayLog();
}
exit(1);
}
void CoreWorker::HandleGetCoreWorkerStats(const rpc::GetCoreWorkerStatsRequest &request,
rpc::GetCoreWorkerStatsReply *reply,
rpc::SendReplyCallback send_reply_callback) {
absl::MutexLock lock(&mutex_);
auto stats = reply->mutable_core_worker_stats();
stats->set_num_pending_tasks(task_manager_->NumPendingTasks());
stats->set_task_queue_length(task_queue_length_);
stats->set_num_executed_tasks(num_executed_tasks_);
stats->set_num_object_ids_in_scope(reference_counter_->NumObjectIDsInScope());
if (!current_task_.TaskId().IsNil()) {
stats->set_current_task_desc(current_task_.DebugString());
for (auto const it : current_task_.FunctionDescriptor()) {
stats->add_current_task_func_desc(it);
}
}
stats->set_ip_address(rpc_address_.ip_address());
stats->set_port(rpc_address_.port());
stats->set_actor_id(actor_id_.Binary());
auto used_resources_map = stats->mutable_used_resources();
for (auto const &it : *resource_ids_) {
double quantity = 0;
for (auto const &pair : it.second) {
quantity += pair.second;
}
(*used_resources_map)[it.first] = quantity;
}
stats->set_webui_display(webui_display_);
MemoryStoreStats memory_store_stats = memory_store_->GetMemoryStoreStatisticalData();
stats->set_num_local_objects(memory_store_stats.num_local_objects);
stats->set_used_object_store_memory(memory_store_stats.used_object_store_memory);
send_reply_callback(Status::OK(), nullptr, nullptr);
}
void CoreWorker::YieldCurrentFiber(FiberEvent &event) {
RAY_CHECK(worker_context_.CurrentActorIsAsync());
boost::this_fiber::yield();
event.Wait();
}
void CoreWorker::GetAsync(const ObjectID &object_id, SetResultCallback success_callback,
SetResultCallback fallback_callback, void *python_future) {
RAY_CHECK(object_id.IsDirectCallType());
memory_store_->GetAsync(object_id, [python_future, success_callback, fallback_callback,
object_id](std::shared_ptr<RayObject> ray_object) {
if (ray_object->IsInPlasmaError()) {
fallback_callback(ray_object, object_id, python_future);
} else {
success_callback(ray_object, object_id, python_future);
}
});
}
void CoreWorker::SetActorId(const ActorID &actor_id) {
absl::MutexLock lock(&mutex_);
RAY_CHECK(actor_id_.IsNil());
actor_id_ = actor_id;
}
void CoreWorker::SetWebuiDisplay(const std::string &message) {
absl::MutexLock lock(&mutex_);
webui_display_ = message;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/core_worker.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_CORE_WORKER_H
#define RAY_CORE_WORKER_CORE_WORKER_H
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "ray/common/buffer.h"
#include "ray/core_worker/actor_handle.h"
#include "ray/core_worker/actor_manager.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/future_resolver.h"
#include "ray/core_worker/profiling.h"
#include "ray/core_worker/reference_count.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/store_provider/plasma_store_provider.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
#include "ray/core_worker/transport/direct_task_transport.h"
#include "ray/core_worker/transport/raylet_transport.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/subscription_executor.h"
#include "ray/raylet/raylet_client.h"
#include "ray/rpc/node_manager/node_manager_client.h"
#include "ray/rpc/worker/core_worker_client.h"
#include "ray/rpc/worker/core_worker_server.h"
/// The set of gRPC handlers and their associated level of concurrency. If you want to
/// add a new call to the worker gRPC server, do the following:
/// 1) Add the rpc to the CoreWorkerService in core_worker.proto, e.g., "ExampleCall"
/// 2) Add a new macro to RAY_CORE_WORKER_DECLARE_RPC_HANDLERS
/// in core_worker_server.h,
// e.g. "DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(ExampleCall)"
/// 3) Add a new macro to RAY_CORE_WORKER_RPC_HANDLERS in core_worker_server.h, e.g.
/// "RPC_SERVICE_HANDLER(CoreWorkerService, ExampleCall, 1)"
/// 4) Add a method to the CoreWorker class below: "CoreWorker::HandleExampleCall"
namespace ray {
/// The root class that contains all the core and language-independent functionalities
/// of the worker. This class is supposed to be used to implement app-language (Java,
/// Python, etc) workers.
class CoreWorker : public rpc::CoreWorkerServiceHandler {
// Callback that must be implemented and provided by the language-specific worker
// frontend to execute tasks and return their results.
using TaskExecutionCallback = std::function<Status(
TaskType task_type, const RayFunction &ray_function,
const std::unordered_map<std::string, double> &required_resources,
const std::vector<std::shared_ptr<RayObject>> &args,
const std::vector<ObjectID> &arg_reference_ids,
const std::vector<ObjectID> &return_ids,
std::vector<std::shared_ptr<RayObject>> *results)>;
public:
/// Construct a CoreWorker instance.
///
/// \param[in] worker_type Type of this worker.
/// \param[in] language Language of this worker.
/// \param[in] store_socket Object store socket to connect to.
/// \param[in] raylet_socket Raylet socket to connect to.
/// \param[in] job_id Job ID of this worker.
/// \param[in] gcs_options Options for the GCS client.
/// \param[in] log_dir Directory to write logs to. If this is empty, logs
/// won't be written to a file.
/// \param[in] node_ip_address IP address of the node.
/// \param[in] node_manager_port Port of the local raylet.
/// \param[in] task_execution_callback Language worker callback to execute tasks.
/// \param[in] check_signals Language worker function to check for signals and handle
/// them. If the function returns anything but StatusOK, any long-running
/// operations in the core worker will short circuit and return that status.
/// \param[in] ref_counting_enabled Whether to enable object ref counting.
///
/// NOTE(zhijunfu): the constructor would throw if a failure happens.
CoreWorker(const WorkerType worker_type, const Language language,
const std::string &store_socket, const std::string &raylet_socket,
const JobID &job_id, const gcs::GcsClientOptions &gcs_options,
const std::string &log_dir, const std::string &node_ip_address,
int node_manager_port, const TaskExecutionCallback &task_execution_callback,
std::function<Status()> check_signals = nullptr,
bool ref_counting_enabled = false);
virtual ~CoreWorker();
void Disconnect();
WorkerType GetWorkerType() const { return worker_type_; }
Language GetLanguage() const { return language_; }
WorkerContext &GetWorkerContext() { return worker_context_; }
raylet::RayletClient &GetRayletClient() { return *local_raylet_client_; }
const TaskID &GetCurrentTaskId() const { return worker_context_.GetCurrentTaskID(); }
void SetCurrentTaskId(const TaskID &task_id);
const JobID &GetCurrentJobId() const { return worker_context_.GetCurrentJobID(); }
void SetActorId(const ActorID &actor_id);
void SetWebuiDisplay(const std::string &message);
/// Increase the reference count for this object ID.
/// Increase the local reference count for this object ID. Should be called
/// by the language frontend when a new reference is created.
///
/// \param[in] object_id The object ID to increase the reference count for.
void AddLocalReference(const ObjectID &object_id) {
reference_counter_->AddLocalReference(object_id);
}
/// Decrease the reference count for this object ID. Should be called
/// by the language frontend when a reference is destroyed.
///
/// \param[in] object_id The object ID to decrease the reference count for.
void RemoveLocalReference(const ObjectID &object_id) {
std::vector<ObjectID> deleted;
reference_counter_->RemoveLocalReference(object_id, &deleted);
if (ref_counting_enabled_) {
memory_store_->Delete(deleted);
}
}
/// Returns a map of all ObjectIDs currently in scope with a pair of their
/// (local, submitted_task) reference counts. For debugging purposes.
std::unordered_map<ObjectID, std::pair<size_t, size_t>> GetAllReferenceCounts() const {
return reference_counter_->GetAllReferenceCounts();
}
/// Promote an object to plasma and get its owner information. This should be
/// called when serializing an object ID, and the returned information should
/// be stored with the serialized object ID. For plasma promotion, if the
/// object already exists locally, it will be put into the plasma store. If
/// it doesn't yet exist, it will be spilled to plasma once available.
///
/// This can only be called on object IDs that we created via task
/// submission, ray.put, or object IDs that we deserialized. It cannot be
/// called on object IDs that were created randomly, e.g.,
/// ObjectID::FromRandom.
///
/// Postcondition: Get(object_id) is valid.
///
/// \param[in] object_id The object ID to serialize.
/// \param[out] owner_id The ID of the object's owner. This should be
/// appended to the serialized object ID.
/// \param[out] owner_address The address of the object's owner. This should
/// be appended to the serialized object ID.
void PromoteToPlasmaAndGetOwnershipInfo(const ObjectID &object_id, TaskID *owner_id,
rpc::Address *owner_address);
/// Add a reference to an ObjectID that was deserialized by the language
/// frontend. This will also start the process to resolve the future.
/// Specifically, we will periodically contact the owner, until we learn that
/// the object has been created or the owner is no longer reachable. This
/// will then unblock any Gets or submissions of tasks dependent on the
/// object.
///
/// \param[in] object_id The object ID to deserialize.
/// \param[out] owner_id The ID of the object's owner.
/// \param[out] owner_address The address of the object's owner.
void RegisterOwnershipInfoAndResolveFuture(const ObjectID &object_id,
const TaskID &owner_id,
const rpc::Address &owner_address);
///
/// Public methods related to storing and retrieving objects.
///
/// Set options for this client's interactions with the object store.
///
/// \param[in] name Unique name for this object store client.
/// \param[in] limit The maximum amount of memory in bytes that this client
/// can use in the object store.
Status SetClientOptions(std::string name, int64_t limit_bytes);
/// Put an object into object store.
///
/// \param[in] object The ray object.
/// \param[out] object_id Generated ID of the object.
/// \return Status.
Status Put(const RayObject &object, ObjectID *object_id);
/// Put an object with specified ID into object store.
///
/// \param[in] object The ray object.
/// \param[in] object_id Object ID specified by the user.
/// \return Status.
Status Put(const RayObject &object, const ObjectID &object_id);
/// Create and return a buffer in the object store that can be directly written
/// into. After writing to the buffer, the caller must call `Seal()` to finalize
/// the object. The `Create()` and `Seal()` combination is an alternative interface
/// to `Put()` that allows frontends to avoid an extra copy when possible.
///
/// \param[in] metadata Metadata of the object to be written.
/// \param[in] data_size Size of the object to be written.
/// \param[out] object_id Object ID generated for the put.
/// \param[out] data Buffer for the user to write the object into.
/// \return Status.
Status Create(const std::shared_ptr<Buffer> &metadata, const size_t data_size,
ObjectID *object_id, std::shared_ptr<Buffer> *data);
/// Create and return a buffer in the object store that can be directly written
/// into. After writing to the buffer, the caller must call `Seal()` to finalize
/// the object. The `Create()` and `Seal()` combination is an alternative interface
/// to `Put()` that allows frontends to avoid an extra copy when possible.
///
/// \param[in] metadata Metadata of the object to be written.
/// \param[in] data_size Size of the object to be written.
/// \param[in] object_id Object ID specified by the user.
/// \param[out] data Buffer for the user to write the object into.
/// \return Status.
Status Create(const std::shared_ptr<Buffer> &metadata, const size_t data_size,
const ObjectID &object_id, std::shared_ptr<Buffer> *data);
/// Finalize placing an object into the object store. This should be called after
/// a corresponding `Create()` call and then writing into the returned buffer.
///
/// \param[in] object_id Object ID corresponding to the object.
/// \param[in] owns_object Whether or not this worker owns the object. If true,
/// the object will be added as owned to the reference counter as an
/// owned object and this worker will be responsible for managing its
/// lifetime.
/// \param[in] pin_object Whether or not to pin the object at the local raylet. This
/// only applies when owns_object is true.
/// \return Status.
Status Seal(const ObjectID &object_id, bool owns_object, bool pin_object);
/// Get a list of objects from the object store. Objects that failed to be retrieved
/// will be returned as nullptrs.
///
/// \param[in] ids IDs of the objects to get.
/// \param[in] timeout_ms Timeout in milliseconds, wait infinitely if it's negative.
/// \param[out] results Result list of objects data.
/// \return Status.
Status Get(const std::vector<ObjectID> &ids, const int64_t timeout_ms,
std::vector<std::shared_ptr<RayObject>> *results);
/// Return whether or not the object store contains the given object.
///
/// \param[in] object_id ID of the objects to check for.
/// \param[out] has_object Whether or not the object is present.
/// \return Status.
Status Contains(const ObjectID &object_id, bool *has_object);
/// Wait for a list of objects to appear in the object store.
/// Duplicate object ids are supported, and `num_objects` includes duplicate ids in this
/// case.
/// TODO(zhijunfu): it is probably more clear in semantics to just fail when there
/// are duplicates, and require it to be handled at application level.
///
/// \param[in] IDs of the objects to wait for.
/// \param[in] num_objects Number of objects that should appear.
/// \param[in] timeout_ms Timeout in milliseconds, wait infinitely if it's negative.
/// \param[out] results A bitset that indicates each object has appeared or not.
/// \return Status.
Status Wait(const std::vector<ObjectID> &object_ids, const int num_objects,
const int64_t timeout_ms, std::vector<bool> *results);
/// Delete a list of objects from the object store.
///
/// \param[in] object_ids IDs of the objects to delete.
/// \param[in] local_only Whether only delete the objects in local node, or all nodes in
/// the cluster.
/// \param[in] delete_creating_tasks Whether also delete the tasks that
/// created these objects.
/// \return Status.
Status Delete(const std::vector<ObjectID> &object_ids, bool local_only,
bool delete_creating_tasks);
/// Get a string describing object store memory usage for debugging purposes.
///
/// \return std::string The string describing memory usage.
std::string MemoryUsageString();
///
/// Public methods related to task submission.
///
/// Get the caller ID used to submit tasks from this worker to an actor.
///
/// \return The caller ID. For non-actor tasks, this is the current task ID.
/// For actors, this is the current actor ID. To make sure that all caller
/// IDs have the same type, we embed the actor ID in a TaskID with the rest
/// of the bytes zeroed out.
TaskID GetCallerId() const;
/// Submit a normal task.
///
/// \param[in] function The remote function to execute.
/// \param[in] args Arguments of this task.
/// \param[in] task_options Options for this task.
/// \param[out] return_ids Ids of the return objects.
/// \return Status error if task submission fails, likely due to raylet failure.
Status SubmitTask(const RayFunction &function, const std::vector<TaskArg> &args,
const TaskOptions &task_options, std::vector<ObjectID> *return_ids,
int max_retries);
/// Create an actor.
///
/// \param[in] caller_id ID of the task submitter.
/// \param[in] function The remote function that generates the actor object.
/// \param[in] args Arguments of this task.
/// \param[in] actor_creation_options Options for this actor creation task.
/// \param[out] actor_handle Handle to the actor.
/// \param[out] actor_id ID of the created actor. This can be used to submit
/// tasks on the actor.
/// \return Status error if actor creation fails, likely due to raylet failure.
Status CreateActor(const RayFunction &function, const std::vector<TaskArg> &args,
const ActorCreationOptions &actor_creation_options,
ActorID *actor_id);
/// Submit an actor task.
///
/// \param[in] caller_id ID of the task submitter.
/// \param[in] actor_handle Handle to the actor.
/// \param[in] function The remote function to execute.
/// \param[in] args Arguments of this task.
/// \param[in] task_options Options for this task.
/// \param[out] return_ids Ids of the return objects.
/// \return Status error if the task is invalid or if the task submission
/// failed. Tasks can be invalid for direct actor calls because not all tasks
/// are currently supported.
Status SubmitActorTask(const ActorID &actor_id, const RayFunction &function,
const std::vector<TaskArg> &args,
const TaskOptions &task_options,
std::vector<ObjectID> *return_ids);
/// Tell an actor to exit immediately, without completing outstanding work.
///
/// \param[in] actor_id ID of the actor to kill.
/// \param[out] Status
Status KillActor(const ActorID &actor_id);
/// Add an actor handle from a serialized string.
///
/// This should be called when an actor handle is given to us by another task
/// or actor. This may be called even if we already have a handle to the same
/// actor.
///
/// \param[in] serialized The serialized actor handle.
/// \return The ActorID of the deserialized handle.
ActorID DeserializeAndRegisterActorHandle(const std::string &serialized);
/// Serialize an actor handle.
///
/// This should be called when passing an actor handle to another task or
/// actor.
///
/// \param[in] actor_id The ID of the actor handle to serialize.
/// \param[out] The serialized handle.
/// \return Status::Invalid if we don't have the specified handle.
Status SerializeActorHandle(const ActorID &actor_id, std::string *output) const;
///
/// Public methods related to task execution. Should not be used by driver processes.
///
const ActorID &GetActorId() const { return actor_id_; }
// Get the resource IDs available to this worker (as assigned by the raylet).
const ResourceMappingType GetResourceIDs() const { return *resource_ids_; }
/// Create a profile event with a reference to the core worker's profiler.
std::unique_ptr<worker::ProfileEvent> CreateProfileEvent(const std::string &event_type);
/// Start receiving and executing tasks.
/// \return void.
void StartExecutingTasks();
/// Allocate the return objects for an executing task. The caller should write into the
/// data buffers of the allocated buffers.
///
/// \param[in] object_ids Object IDs of the return values.
/// \param[in] data_sizes Sizes of the return values.
/// \param[in] metadatas Metadata buffers of the return values.
/// \param[out] return_objects RayObjects containing buffers to write results into.
/// \return Status.
Status AllocateReturnObjects(const std::vector<ObjectID> &object_ids,
const std::vector<size_t> &data_sizes,
const std::vector<std::shared_ptr<Buffer>> &metadatas,
std::vector<std::shared_ptr<RayObject>> *return_objects);
/// Get a handle to an actor.
///
/// \param[in] actor_id The actor handle to get.
/// \param[out] actor_handle A handle to the requested actor.
/// \return Status::Invalid if we don't have this actor handle.
Status GetActorHandle(const ActorID &actor_id, ActorHandle **actor_handle) const;
///
/// The following methods are handlers for the core worker's gRPC server, which follow
/// a macro-generated call convention. These are executed on the io_service_ and
/// post work to the appropriate event loop.
///
/// Implements gRPC server handler.
void HandleAssignTask(const rpc::AssignTaskRequest &request,
rpc::AssignTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Implements gRPC server handler.
void HandlePushTask(const rpc::PushTaskRequest &request, rpc::PushTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Implements gRPC server handler.
void HandleDirectActorCallArgWaitComplete(
const rpc::DirectActorCallArgWaitCompleteRequest &request,
rpc::DirectActorCallArgWaitCompleteReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Implements gRPC server handler.
void HandleGetObjectStatus(const rpc::GetObjectStatusRequest &request,
rpc::GetObjectStatusReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Implements gRPC server handler.
void HandleWaitForObjectEviction(const rpc::WaitForObjectEvictionRequest &request,
rpc::WaitForObjectEvictionReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Implements gRPC server handler.
void HandleKillActor(const rpc::KillActorRequest &request, rpc::KillActorReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Get statistics from core worker.
void HandleGetCoreWorkerStats(const rpc::GetCoreWorkerStatsRequest &request,
rpc::GetCoreWorkerStatsReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
///
/// Public methods related to async actor call. This should only be used when
/// the actor is (1) direct actor and (2) using asyncio mode.
///
/// Block current fiber until event is triggered.
void YieldCurrentFiber(FiberEvent &event);
/// The callback expected to be implemented by the client.
using SetResultCallback =
std::function<void(std::shared_ptr<RayObject>, ObjectID object_id, void *)>;
/// Perform async get from in-memory store.
///
/// \param[in] object_id The id to call get on. Assumes object_id.IsDirectCallType().
/// \param[in] success_callback The callback to use the result object.
/// \param[in] fallback_callback The callback to use when failed to get result.
/// \param[in] python_future the void* object to be passed to SetResultCallback
/// \return void
void GetAsync(const ObjectID &object_id, SetResultCallback success_callback,
SetResultCallback fallback_callback, void *python_future);
private:
/// Run the io_service_ event loop. This should be called in a background thread.
void RunIOService();
/// Shut down the worker completely.
/// \return void.
void Shutdown();
/// Send the list of active object IDs to the raylet.
void ReportActiveObjectIDs();
/// Heartbeat for internal bookkeeping.
void InternalHeartbeat();
///
/// Private methods related to task submission.
///
/// Give this worker a handle to an actor.
///
/// This handle will remain as long as the current actor or task is
/// executing, even if the Python handle goes out of scope. Tasks submitted
/// through this handle are guaranteed to execute in the same order in which
/// they are submitted.
///
/// \param actor_handle The handle to the actor.
/// \return True if the handle was added and False if we already had a handle
/// to the same actor.
bool AddActorHandle(std::unique_ptr<ActorHandle> actor_handle);
///
/// Private methods related to task execution. Should not be used by driver processes.
///
/// Execute a task.
///
/// \param spec[in] Task specification.
/// \param spec[in] Resource IDs of resources assigned to this worker. If nullptr,
/// reuse the previously assigned resources.
/// \param results[out] Result objects that should be returned by value (not via
/// plasma).
/// \return Status.
Status ExecuteTask(const TaskSpecification &task_spec,
const std::shared_ptr<ResourceMappingType> &resource_ids,
std::vector<std::shared_ptr<RayObject>> *return_objects);
/// Build arguments for task executor. This would loop through all the arguments
/// in task spec, and for each of them that's passed by reference (ObjectID),
/// fetch its content from store and; for arguments that are passed by value,
/// just copy their content.
///
/// \param spec[in] Task specification.
/// \param args[out] Argument data as RayObjects.
/// \param args[out] ObjectIDs corresponding to each by reference argument. The length
/// of this vector will be the same as args, and by value arguments
/// will have ObjectID::Nil().
/// // TODO(edoakes): this is a bit of a hack that's necessary because
/// we have separate serialization paths for by-value and by-reference
/// arguments in Python. This should ideally be handled better there.
/// \return The arguments for passing to task executor.
Status BuildArgsForExecutor(const TaskSpecification &task,
std::vector<std::shared_ptr<RayObject>> *args,
std::vector<ObjectID> *arg_reference_ids);
/// Returns whether the message was sent to the wrong worker. The right error reply
/// is sent automatically. Messages end up on the wrong worker when a worker dies
/// and a new one takes its place with the same place. In this situation, we want
/// the new worker to reject messages meant for the old one.
bool HandleWrongRecipient(const WorkerID &intended_worker_id,
rpc::SendReplyCallback send_reply_callback) {
if (intended_worker_id != worker_context_.GetWorkerID()) {
std::ostringstream stream;
stream << "Mismatched WorkerID: ignoring RPC for previous worker "
<< intended_worker_id
<< ", current worker ID: " << worker_context_.GetWorkerID();
auto msg = stream.str();
RAY_LOG(ERROR) << msg;
send_reply_callback(Status::Invalid(msg), nullptr, nullptr);
return true;
} else {
return false;
}
}
/// Type of this worker (i.e., DRIVER or WORKER).
const WorkerType worker_type_;
/// Application language of this worker (i.e., PYTHON or JAVA).
const Language language_;
/// Directory where log files are written.
const std::string log_dir_;
/// Whether local reference counting is enabled.
const bool ref_counting_enabled_;
/// Application-language callback to check for signals that have been received
/// since calling into C++. This will be called periodically (at least every
/// 1s) during long-running operations.
std::function<Status()> check_signals_;
/// Shared state of the worker. Includes process-level and thread-level state.
/// TODO(edoakes): we should move process-level state into this class and make
/// this a ThreadContext.
WorkerContext worker_context_;
/// The ID of the current task being executed by the main thread. If there
/// are multiple threads, they will have a thread-local task ID stored in the
/// worker context.
TaskID main_thread_task_id_;
// Flag indicating whether this worker has been shut down.
bool shutdown_ = false;
/// Event loop where the IO events are handled. e.g. async GCS operations.
boost::asio::io_service io_service_;
/// Keeps the io_service_ alive.
boost::asio::io_service::work io_work_;
/// Shared client call manager.
std::unique_ptr<rpc::ClientCallManager> client_call_manager_;
/// Timer used to periodically send heartbeat containing active object IDs to the
/// raylet.
boost::asio::steady_timer heartbeat_timer_;
/// Timer for internal book-keeping.
boost::asio::steady_timer internal_timer_;
/// RPC server used to receive tasks to execute.
rpc::GrpcServer core_worker_server_;
/// Address of our RPC server.
rpc::Address rpc_address_;
/// Whether or not this worker is connected to the raylet and GCS.
bool connected_ = false;
// Client to the GCS shared by core worker interfaces.
std::shared_ptr<gcs::GcsClient> gcs_client_;
// Client to the raylet shared by core worker interfaces. This needs to be a
// shared_ptr for direct calls because we can lease multiple workers through
// one client, and we need to keep the connection alive until we return all
// of the workers.
std::shared_ptr<raylet::RayletClient> local_raylet_client_;
// Thread that runs a boost::asio service to process IO events.
std::thread io_thread_;
// Keeps track of object ID reference counts.
std::shared_ptr<ReferenceCounter> reference_counter_;
///
/// Fields related to storing and retrieving objects.
///
/// In-memory store for return objects.
std::shared_ptr<CoreWorkerMemoryStore> memory_store_;
/// Plasma store interface.
std::shared_ptr<CoreWorkerPlasmaStoreProvider> plasma_store_provider_;
std::unique_ptr<FutureResolver> future_resolver_;
///
/// Fields related to task submission.
///
// Tracks the currently pending tasks.
std::shared_ptr<TaskManager> task_manager_;
// Interface for publishing actor death event for actor creation failure.
std::shared_ptr<ActorManager> actor_manager_;
// Interface to submit tasks directly to other actors.
std::unique_ptr<CoreWorkerDirectActorTaskSubmitter> direct_actor_submitter_;
// Interface to submit non-actor tasks directly to leased workers.
std::unique_ptr<CoreWorkerDirectTaskSubmitter> direct_task_submitter_;
/// The `actor_handles_` field could be mutated concurrently due to multi-threading, we
/// need a mutex to protect it.
mutable absl::Mutex actor_handles_mutex_;
/// Map from actor ID to a handle to that actor.
absl::flat_hash_map<ActorID, std::unique_ptr<ActorHandle>> actor_handles_
GUARDED_BY(actor_handles_mutex_);
///
/// Fields related to task execution.
///
/// Protects around accesses to fields below. This should only ever be held
/// for short-running periods of time.
mutable absl::Mutex mutex_;
/// Our actor ID. If this is nil, then we execute only stateless tasks.
ActorID actor_id_ GUARDED_BY(mutex_);
/// The currently executing task spec. We have to track this separately since
/// we cannot access the thread-local worker contexts from GetCoreWorkerStats()
TaskSpecification current_task_ GUARDED_BY(mutex_);
/// String to be displayed on Web UI.
std::string webui_display_ GUARDED_BY(mutex_);
/// Number of tasks that have been pushed to the actor but not executed.
std::atomic<int64_t> task_queue_length_;
/// Number of executed tasks.
std::atomic<int64_t> num_executed_tasks_;
/// Event loop where tasks are processed.
boost::asio::io_service task_execution_service_;
/// The asio work to keep task_execution_service_ alive.
boost::asio::io_service::work task_execution_service_work_;
/// Profiler including a background thread that pushes profiling events to the GCS.
std::shared_ptr<worker::Profiler> profiler_;
/// Task execution callback.
TaskExecutionCallback task_execution_callback_;
/// A map from resource name to the resource IDs that are currently reserved
/// for this worker. Each pair consists of the resource ID and the fraction
/// of that resource allocated for this worker. This is set on task assignment.
std::shared_ptr<ResourceMappingType> resource_ids_;
// Interface that receives tasks from the raylet.
std::unique_ptr<CoreWorkerRayletTaskReceiver> raylet_task_receiver_;
/// Common rpc service for all worker modules.
rpc::CoreWorkerGrpcService grpc_service_;
// Interface that receives tasks from direct actor calls.
std::unique_ptr<CoreWorkerDirectTaskReceiver> direct_task_receiver_;
// Queue of tasks to resubmit when the specified time passes.
std::deque<std::pair<int64_t, TaskSpecification>> to_resubmit_ GUARDED_BY(mutex_);
friend class CoreWorkerTest;
};
} // namespace ray
#endif // RAY_CORE_WORKER_CORE_WORKER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/future_resolver.cc
|
C++
|
#include "ray/core_worker/future_resolver.h"
namespace ray {
void FutureResolver::ResolveFutureAsync(const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address) {
RAY_CHECK(object_id.IsDirectCallType());
absl::MutexLock lock(&mu_);
auto it = owner_clients_.find(owner_id);
if (it == owner_clients_.end()) {
auto client = std::shared_ptr<rpc::CoreWorkerClientInterface>(
client_factory_(owner_address.ip_address(), owner_address.port()));
it = owner_clients_.emplace(owner_id, std::move(client)).first;
}
rpc::GetObjectStatusRequest request;
request.set_object_id(object_id.Binary());
request.set_owner_id(owner_id.Binary());
RAY_CHECK_OK(it->second->GetObjectStatus(
request,
[this, object_id](const Status &status, const rpc::GetObjectStatusReply &reply) {
if (!status.ok()) {
RAY_LOG(WARNING) << "Error retrieving the value of object ID " << object_id
<< " that was deserialized: " << status.ToString();
}
// Either the owner is gone or the owner replied that the object has
// been created. In both cases, we can now try to fetch the object via
// plasma.
RAY_CHECK_OK(in_memory_store_->Put(RayObject(rpc::ErrorType::OBJECT_IN_PLASMA),
object_id));
}));
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/future_resolver.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_FUTURE_RESOLVER_H
#define RAY_CORE_WORKER_FUTURE_RESOLVER_H
#include <memory>
#include "ray/common/id.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/protobuf/core_worker.pb.h"
#include "ray/rpc/worker/core_worker_client.h"
namespace ray {
// Resolve values for futures that were given to us before the value
// was available. This class is thread-safe.
class FutureResolver {
public:
FutureResolver(std::shared_ptr<CoreWorkerMemoryStore> store,
rpc::ClientFactoryFn client_factory)
: in_memory_store_(store), client_factory_(client_factory) {}
/// Resolve the value for a future. This will periodically contact the given
/// owner until the owner dies or the owner has finished creating the object.
/// In either case, this will put an OBJECT_IN_PLASMA error as the future's
/// value.
///
/// \param[in] object_id The ID of the future to resolve.
/// \param[in] owner_id The ID of the task or actor that owns the future.
/// \param[in] owner_address The address of the task or actor that owns the
/// future.
void ResolveFutureAsync(const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address);
private:
/// Used to store values of resolved futures.
std::shared_ptr<CoreWorkerMemoryStore> in_memory_store_;
/// Factory for producing new core worker clients.
const rpc::ClientFactoryFn client_factory_;
/// Protects against concurrent access to internal state.
absl::Mutex mu_;
/// Cache of gRPC clients to the objects' owners.
absl::flat_hash_map<TaskID, std::shared_ptr<rpc::CoreWorkerClientInterface>>
owner_clients_ GUARDED_BY(mu_);
};
} // namespace ray
#endif // RAY_CORE_WORKER_FUTURE_RESOLVER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/jni_init.cc
|
C++
|
#include "ray/core_worker/lib/java/jni_utils.h"
jclass java_boolean_class;
jmethodID java_boolean_init;
jclass java_double_class;
jmethodID java_double_double_value;
jclass java_list_class;
jmethodID java_list_size;
jmethodID java_list_get;
jmethodID java_list_add;
jclass java_array_list_class;
jmethodID java_array_list_init;
jmethodID java_array_list_init_with_capacity;
jclass java_map_class;
jmethodID java_map_entry_set;
jclass java_set_class;
jmethodID java_set_iterator;
jclass java_iterator_class;
jmethodID java_iterator_has_next;
jmethodID java_iterator_next;
jclass java_map_entry_class;
jmethodID java_map_entry_get_key;
jmethodID java_map_entry_get_value;
jclass java_ray_exception_class;
jclass java_jni_exception_util_class;
jmethodID java_jni_exception_util_get_stack_trace;
jclass java_base_id_class;
jmethodID java_base_id_get_bytes;
jclass java_function_descriptor_class;
jmethodID java_function_descriptor_get_language;
jmethodID java_function_descriptor_to_list;
jclass java_language_class;
jmethodID java_language_get_number;
jclass java_function_arg_class;
jfieldID java_function_arg_id;
jfieldID java_function_arg_value;
jclass java_base_task_options_class;
jfieldID java_base_task_options_resources;
jclass java_actor_creation_options_class;
jfieldID java_actor_creation_options_default_use_direct_call;
jfieldID java_actor_creation_options_max_reconstructions;
jfieldID java_actor_creation_options_use_direct_call;
jfieldID java_actor_creation_options_jvm_options;
jclass java_gcs_client_options_class;
jfieldID java_gcs_client_options_ip;
jfieldID java_gcs_client_options_port;
jfieldID java_gcs_client_options_password;
jclass java_native_ray_object_class;
jmethodID java_native_ray_object_init;
jfieldID java_native_ray_object_data;
jfieldID java_native_ray_object_metadata;
jclass java_task_executor_class;
jmethodID java_task_executor_execute;
JavaVM *jvm;
inline jclass LoadClass(JNIEnv *env, const char *class_name) {
jclass tempLocalClassRef = env->FindClass(class_name);
jclass ret = (jclass)env->NewGlobalRef(tempLocalClassRef);
RAY_CHECK(ret) << "Can't load Java class " << class_name;
env->DeleteLocalRef(tempLocalClassRef);
return ret;
}
/// Load and cache frequently-used Java classes and methods
jint JNI_OnLoad(JavaVM *vm, void *reserved) {
JNIEnv *env;
if (vm->GetEnv(reinterpret_cast<void **>(&env), CURRENT_JNI_VERSION) != JNI_OK) {
return JNI_ERR;
}
jvm = vm;
java_boolean_class = LoadClass(env, "java/lang/Boolean");
java_boolean_init = env->GetMethodID(java_boolean_class, "<init>", "(Z)V");
java_double_class = LoadClass(env, "java/lang/Double");
java_double_double_value = env->GetMethodID(java_double_class, "doubleValue", "()D");
java_list_class = LoadClass(env, "java/util/List");
java_list_size = env->GetMethodID(java_list_class, "size", "()I");
java_list_get = env->GetMethodID(java_list_class, "get", "(I)Ljava/lang/Object;");
java_list_add = env->GetMethodID(java_list_class, "add", "(Ljava/lang/Object;)Z");
java_array_list_class = LoadClass(env, "java/util/ArrayList");
java_array_list_init = env->GetMethodID(java_array_list_class, "<init>", "()V");
java_array_list_init_with_capacity =
env->GetMethodID(java_array_list_class, "<init>", "(I)V");
java_map_class = LoadClass(env, "java/util/Map");
java_map_entry_set = env->GetMethodID(java_map_class, "entrySet", "()Ljava/util/Set;");
java_set_class = LoadClass(env, "java/util/Set");
java_set_iterator =
env->GetMethodID(java_set_class, "iterator", "()Ljava/util/Iterator;");
java_iterator_class = LoadClass(env, "java/util/Iterator");
java_iterator_has_next = env->GetMethodID(java_iterator_class, "hasNext", "()Z");
java_iterator_next =
env->GetMethodID(java_iterator_class, "next", "()Ljava/lang/Object;");
java_map_entry_class = LoadClass(env, "java/util/Map$Entry");
java_map_entry_get_key =
env->GetMethodID(java_map_entry_class, "getKey", "()Ljava/lang/Object;");
java_map_entry_get_value =
env->GetMethodID(java_map_entry_class, "getValue", "()Ljava/lang/Object;");
java_ray_exception_class = LoadClass(env, "org/ray/api/exception/RayException");
java_jni_exception_util_class = LoadClass(env, "org/ray/runtime/util/JniExceptionUtil");
java_jni_exception_util_get_stack_trace = env->GetStaticMethodID(
java_jni_exception_util_class, "getStackTrace",
"(Ljava/lang/String;ILjava/lang/String;Ljava/lang/Throwable;)Ljava/lang/String;");
java_base_id_class = LoadClass(env, "org/ray/api/id/BaseId");
java_base_id_get_bytes = env->GetMethodID(java_base_id_class, "getBytes", "()[B");
java_function_descriptor_class =
LoadClass(env, "org/ray/runtime/functionmanager/FunctionDescriptor");
java_function_descriptor_get_language =
env->GetMethodID(java_function_descriptor_class, "getLanguage",
"()Lorg/ray/runtime/generated/Common$Language;");
java_function_descriptor_to_list =
env->GetMethodID(java_function_descriptor_class, "toList", "()Ljava/util/List;");
java_language_class = LoadClass(env, "org/ray/runtime/generated/Common$Language");
java_language_get_number = env->GetMethodID(java_language_class, "getNumber", "()I");
java_function_arg_class = LoadClass(env, "org/ray/runtime/task/FunctionArg");
java_function_arg_id =
env->GetFieldID(java_function_arg_class, "id", "Lorg/ray/api/id/ObjectId;");
java_function_arg_value = env->GetFieldID(java_function_arg_class, "value",
"Lorg/ray/runtime/object/NativeRayObject;");
java_base_task_options_class = LoadClass(env, "org/ray/api/options/BaseTaskOptions");
java_base_task_options_resources =
env->GetFieldID(java_base_task_options_class, "resources", "Ljava/util/Map;");
java_actor_creation_options_class =
LoadClass(env, "org/ray/api/options/ActorCreationOptions");
java_actor_creation_options_default_use_direct_call = env->GetStaticFieldID(
java_actor_creation_options_class, "DEFAULT_USE_DIRECT_CALL", "Z");
java_actor_creation_options_max_reconstructions =
env->GetFieldID(java_actor_creation_options_class, "maxReconstructions", "I");
java_actor_creation_options_use_direct_call =
env->GetFieldID(java_actor_creation_options_class, "useDirectCall", "Z");
java_actor_creation_options_jvm_options = env->GetFieldID(
java_actor_creation_options_class, "jvmOptions", "Ljava/lang/String;");
java_gcs_client_options_class = LoadClass(env, "org/ray/runtime/gcs/GcsClientOptions");
java_gcs_client_options_ip =
env->GetFieldID(java_gcs_client_options_class, "ip", "Ljava/lang/String;");
java_gcs_client_options_port =
env->GetFieldID(java_gcs_client_options_class, "port", "I");
java_gcs_client_options_password =
env->GetFieldID(java_gcs_client_options_class, "password", "Ljava/lang/String;");
java_native_ray_object_class = LoadClass(env, "org/ray/runtime/object/NativeRayObject");
java_native_ray_object_init =
env->GetMethodID(java_native_ray_object_class, "<init>", "([B[B)V");
java_native_ray_object_data =
env->GetFieldID(java_native_ray_object_class, "data", "[B");
java_native_ray_object_metadata =
env->GetFieldID(java_native_ray_object_class, "metadata", "[B");
java_task_executor_class = LoadClass(env, "org/ray/runtime/task/TaskExecutor");
java_task_executor_execute =
env->GetMethodID(java_task_executor_class, "execute",
"(Ljava/util/List;Ljava/util/List;)Ljava/util/List;");
return CURRENT_JNI_VERSION;
}
/// Unload java classes
void JNI_OnUnload(JavaVM *vm, void *reserved) {
JNIEnv *env;
vm->GetEnv(reinterpret_cast<void **>(&env), CURRENT_JNI_VERSION);
env->DeleteGlobalRef(java_boolean_class);
env->DeleteGlobalRef(java_double_class);
env->DeleteGlobalRef(java_list_class);
env->DeleteGlobalRef(java_array_list_class);
env->DeleteGlobalRef(java_map_class);
env->DeleteGlobalRef(java_set_class);
env->DeleteGlobalRef(java_iterator_class);
env->DeleteGlobalRef(java_map_entry_class);
env->DeleteGlobalRef(java_ray_exception_class);
env->DeleteGlobalRef(java_jni_exception_util_class);
env->DeleteGlobalRef(java_base_id_class);
env->DeleteGlobalRef(java_function_descriptor_class);
env->DeleteGlobalRef(java_language_class);
env->DeleteGlobalRef(java_function_arg_class);
env->DeleteGlobalRef(java_base_task_options_class);
env->DeleteGlobalRef(java_actor_creation_options_class);
env->DeleteGlobalRef(java_native_ray_object_class);
env->DeleteGlobalRef(java_task_executor_class);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/jni_utils.h
|
C/C++ Header
|
#ifndef RAY_COMMON_JAVA_JNI_UTILS_H
#define RAY_COMMON_JAVA_JNI_UTILS_H
#include <jni.h>
#include "ray/common/buffer.h"
#include "ray/common/id.h"
#include "ray/common/ray_object.h"
#include "ray/common/status.h"
/// Boolean class
extern jclass java_boolean_class;
/// Constructor of Boolean class
extern jmethodID java_boolean_init;
/// Double class
extern jclass java_double_class;
/// doubleValue method of Double class
extern jmethodID java_double_double_value;
/// List class
extern jclass java_list_class;
/// size method of List class
extern jmethodID java_list_size;
/// get method of List class
extern jmethodID java_list_get;
/// add method of List class
extern jmethodID java_list_add;
/// ArrayList class
extern jclass java_array_list_class;
/// Constructor of ArrayList class
extern jmethodID java_array_list_init;
/// Constructor of ArrayList class with single parameter capacity
extern jmethodID java_array_list_init_with_capacity;
/// Map interface
extern jclass java_map_class;
/// entrySet method of Map interface
extern jmethodID java_map_entry_set;
/// Set interface
extern jclass java_set_class;
/// iterator method of Set interface
extern jmethodID java_set_iterator;
/// Iterator interface
extern jclass java_iterator_class;
/// hasNext method of Iterator interface
extern jmethodID java_iterator_has_next;
/// next method of Iterator interface
extern jmethodID java_iterator_next;
/// Map.Entry interface
extern jclass java_map_entry_class;
/// getKey method of Map.Entry interface
extern jmethodID java_map_entry_get_key;
/// getValue method of Map.Entry interface
extern jmethodID java_map_entry_get_value;
/// RayException class
extern jclass java_ray_exception_class;
/// JniExceptionUtil class
extern jclass java_jni_exception_util_class;
/// getStackTrace method of JniExceptionUtil class
extern jmethodID java_jni_exception_util_get_stack_trace;
/// BaseId class
extern jclass java_base_id_class;
/// getBytes method of BaseId class
extern jmethodID java_base_id_get_bytes;
/// FunctionDescriptor interface
extern jclass java_function_descriptor_class;
/// getLanguage method of FunctionDescriptor interface
extern jmethodID java_function_descriptor_get_language;
/// toList method of FunctionDescriptor interface
extern jmethodID java_function_descriptor_to_list;
/// Language class
extern jclass java_language_class;
/// getNumber of Language class
extern jmethodID java_language_get_number;
/// FunctionArg class
extern jclass java_function_arg_class;
/// id field of FunctionArg class
extern jfieldID java_function_arg_id;
/// value field of FunctionArg class
extern jfieldID java_function_arg_value;
/// BaseTaskOptions class
extern jclass java_base_task_options_class;
/// resources field of BaseTaskOptions class
extern jfieldID java_base_task_options_resources;
/// ActorCreationOptions class
extern jclass java_actor_creation_options_class;
/// DEFAULT_USE_DIRECT_CALL field of ActorCreationOptions class
extern jfieldID java_actor_creation_options_default_use_direct_call;
/// maxReconstructions field of ActorCreationOptions class
extern jfieldID java_actor_creation_options_max_reconstructions;
/// useDirectCall field of ActorCreationOptions class
extern jfieldID java_actor_creation_options_use_direct_call;
/// jvmOptions field of ActorCreationOptions class
extern jfieldID java_actor_creation_options_jvm_options;
/// GcsClientOptions class
extern jclass java_gcs_client_options_class;
/// ip field of GcsClientOptions class
extern jfieldID java_gcs_client_options_ip;
/// port field of GcsClientOptions class
extern jfieldID java_gcs_client_options_port;
/// password field of GcsClientOptions class
extern jfieldID java_gcs_client_options_password;
/// NativeRayObject class
extern jclass java_native_ray_object_class;
/// Constructor of NativeRayObject class
extern jmethodID java_native_ray_object_init;
/// data field of NativeRayObject class
extern jfieldID java_native_ray_object_data;
/// metadata field of NativeRayObject class
extern jfieldID java_native_ray_object_metadata;
/// TaskExecutor class
extern jclass java_task_executor_class;
/// execute method of TaskExecutor class
extern jmethodID java_task_executor_execute;
#define CURRENT_JNI_VERSION JNI_VERSION_1_8
extern JavaVM *jvm;
/// Throws a Java RayException if the status is not OK.
#define THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, ret) \
{ \
if (!(status).ok()) { \
(env)->ThrowNew(java_ray_exception_class, (status).message().c_str()); \
return (ret); \
} \
}
#define RAY_CHECK_JAVA_EXCEPTION(env) \
{ \
jthrowable throwable = env->ExceptionOccurred(); \
if (throwable) { \
jstring java_file_name = env->NewStringUTF(__FILE__); \
jstring java_function = env->NewStringUTF(__func__); \
jobject java_error_message = env->CallStaticObjectMethod( \
java_jni_exception_util_class, java_jni_exception_util_get_stack_trace, \
java_file_name, __LINE__, java_function, throwable); \
std::string error_message = \
JavaStringToNativeString(env, static_cast<jstring>(java_error_message)); \
env->DeleteLocalRef(throwable); \
env->DeleteLocalRef(java_file_name); \
env->DeleteLocalRef(java_function); \
env->DeleteLocalRef(java_error_message); \
RAY_LOG(FATAL) << "An unexpected exception occurred while executing Java code " \
"from JNI (" \
<< __FILE__ << ":" << __LINE__ << " " << __func__ << ")." \
<< "\n" \
<< error_message; \
} \
}
/// Represents a byte buffer of Java byte array.
/// The destructor will automatically call ReleaseByteArrayElements.
/// NOTE: Instances of this class cannot be used across threads.
class JavaByteArrayBuffer : public ray::Buffer {
public:
JavaByteArrayBuffer(JNIEnv *env, jbyteArray java_byte_array)
: env_(env), java_byte_array_(java_byte_array) {
native_bytes_ = env_->GetByteArrayElements(java_byte_array_, nullptr);
}
uint8_t *Data() const override { return reinterpret_cast<uint8_t *>(native_bytes_); }
size_t Size() const override { return env_->GetArrayLength(java_byte_array_); }
bool OwnsData() const override { return true; }
bool IsPlasmaBuffer() const { return false; }
~JavaByteArrayBuffer() {
env_->ReleaseByteArrayElements(java_byte_array_, native_bytes_, JNI_ABORT);
env_->DeleteLocalRef(java_byte_array_);
}
private:
JNIEnv *env_;
jbyteArray java_byte_array_;
jbyte *native_bytes_;
};
/// Convert a Java byte array to a C++ UniqueID.
template <typename ID>
inline ID JavaByteArrayToId(JNIEnv *env, const jbyteArray &bytes) {
std::string id_str(ID::Size(), 0);
env->GetByteArrayRegion(bytes, 0, ID::Size(),
reinterpret_cast<jbyte *>(&id_str.front()));
return ID::FromBinary(id_str);
}
/// Convert C++ UniqueID to a Java byte array.
template <typename ID>
inline jbyteArray IdToJavaByteArray(JNIEnv *env, const ID &id) {
jbyteArray array = env->NewByteArray(ID::Size());
env->SetByteArrayRegion(array, 0, ID::Size(),
reinterpret_cast<const jbyte *>(id.Data()));
return array;
}
/// Convert C++ UniqueID to a Java ByteBuffer.
template <typename ID>
inline jobject IdToJavaByteBuffer(JNIEnv *env, const ID &id) {
return env->NewDirectByteBuffer(
reinterpret_cast<void *>(const_cast<uint8_t *>(id.Data())), id.Size());
}
/// Convert a Java String to C++ std::string.
inline std::string JavaStringToNativeString(JNIEnv *env, jstring jstr) {
const char *c_str = env->GetStringUTFChars(jstr, nullptr);
std::string result(c_str);
env->ReleaseStringUTFChars(static_cast<jstring>(jstr), c_str);
return result;
}
/// Convert a Java List to C++ std::vector.
template <typename NativeT>
inline void JavaListToNativeVector(
JNIEnv *env, jobject java_list, std::vector<NativeT> *native_vector,
std::function<NativeT(JNIEnv *, jobject)> element_converter) {
int size = env->CallIntMethod(java_list, java_list_size);
RAY_CHECK_JAVA_EXCEPTION(env);
native_vector->clear();
for (int i = 0; i < size; i++) {
auto element = env->CallObjectMethod(java_list, java_list_get, (jint)i);
RAY_CHECK_JAVA_EXCEPTION(env);
native_vector->emplace_back(element_converter(env, element));
env->DeleteLocalRef(element);
}
}
/// Convert a Java List<String> to C++ std::vector<std::string>.
inline void JavaStringListToNativeStringVector(JNIEnv *env, jobject java_list,
std::vector<std::string> *native_vector) {
JavaListToNativeVector<std::string>(
env, java_list, native_vector, [](JNIEnv *env, jobject jstr) {
return JavaStringToNativeString(env, static_cast<jstring>(jstr));
});
}
/// Convert a C++ std::vector to a Java List.
template <typename NativeT>
inline jobject NativeVectorToJavaList(
JNIEnv *env, const std::vector<NativeT> &native_vector,
std::function<jobject(JNIEnv *, const NativeT &)> element_converter) {
jobject java_list =
env->NewObject(java_array_list_class, java_array_list_init_with_capacity,
(jint)native_vector.size());
for (const auto &item : native_vector) {
auto element = element_converter(env, item);
env->CallVoidMethod(java_list, java_list_add, element);
RAY_CHECK_JAVA_EXCEPTION(env);
env->DeleteLocalRef(element);
}
return java_list;
}
/// Convert a C++ std::vector<std::string> to a Java List<String>
inline jobject NativeStringVectorToJavaStringList(
JNIEnv *env, const std::vector<std::string> &native_vector) {
return NativeVectorToJavaList<std::string>(
env, native_vector,
[](JNIEnv *env, const std::string &str) { return env->NewStringUTF(str.c_str()); });
}
template <typename ID>
inline jobject NativeIdVectorToJavaByteArrayList(JNIEnv *env,
const std::vector<ID> &native_vector) {
return NativeVectorToJavaList<ID>(env, native_vector, [](JNIEnv *env, const ID &id) {
return IdToJavaByteArray<ID>(env, id);
});
}
/// Convert a C++ ray::Buffer to a Java byte array.
inline jbyteArray NativeBufferToJavaByteArray(JNIEnv *env,
const std::shared_ptr<ray::Buffer> buffer) {
if (!buffer) {
return nullptr;
}
jbyteArray java_byte_array = env->NewByteArray(buffer->Size());
if (buffer->Size() > 0) {
env->SetByteArrayRegion(java_byte_array, 0, buffer->Size(),
reinterpret_cast<const jbyte *>(buffer->Data()));
}
return java_byte_array;
}
/// Convert a Java byte[] as a C++ std::shared_ptr<JavaByteArrayBuffer>.
inline std::shared_ptr<JavaByteArrayBuffer> JavaByteArrayToNativeBuffer(
JNIEnv *env, const jbyteArray &javaByteArray) {
if (!javaByteArray) {
return nullptr;
}
return std::make_shared<JavaByteArrayBuffer>(env, javaByteArray);
}
/// Convert a Java NativeRayObject to a C++ ray::RayObject.
/// NOTE: the returned ray::RayObject cannot be used across threads.
inline std::shared_ptr<ray::RayObject> JavaNativeRayObjectToNativeRayObject(
JNIEnv *env, const jobject &java_obj) {
if (!java_obj) {
return nullptr;
}
auto java_data = (jbyteArray)env->GetObjectField(java_obj, java_native_ray_object_data);
auto java_metadata =
(jbyteArray)env->GetObjectField(java_obj, java_native_ray_object_metadata);
std::shared_ptr<ray::Buffer> data_buffer = JavaByteArrayToNativeBuffer(env, java_data);
std::shared_ptr<ray::Buffer> metadata_buffer =
JavaByteArrayToNativeBuffer(env, java_metadata);
if (data_buffer && data_buffer->Size() == 0) {
data_buffer = nullptr;
}
if (metadata_buffer && metadata_buffer->Size() == 0) {
metadata_buffer = nullptr;
}
return std::make_shared<ray::RayObject>(data_buffer, metadata_buffer);
}
/// Convert a C++ ray::RayObject to a Java NativeRayObject.
inline jobject NativeRayObjectToJavaNativeRayObject(
JNIEnv *env, const std::shared_ptr<ray::RayObject> &rayObject) {
if (!rayObject) {
return nullptr;
}
auto java_data = NativeBufferToJavaByteArray(env, rayObject->GetData());
auto java_metadata = NativeBufferToJavaByteArray(env, rayObject->GetMetadata());
auto java_obj = env->NewObject(java_native_ray_object_class,
java_native_ray_object_init, java_data, java_metadata);
env->DeleteLocalRef(java_metadata);
env->DeleteLocalRef(java_data);
return java_obj;
}
#endif // RAY_COMMON_JAVA_JNI_UTILS_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_RayNativeRuntime.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_RayNativeRuntime.h"
#include <jni.h>
#include <sstream>
#include "ray/common/id.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
thread_local JNIEnv *local_env = nullptr;
thread_local jobject local_java_task_executor = nullptr;
inline ray::gcs::GcsClientOptions ToGcsClientOptions(JNIEnv *env,
jobject gcs_client_options) {
std::string ip = JavaStringToNativeString(
env, (jstring)env->GetObjectField(gcs_client_options, java_gcs_client_options_ip));
int port = env->GetIntField(gcs_client_options, java_gcs_client_options_port);
std::string password = JavaStringToNativeString(
env,
(jstring)env->GetObjectField(gcs_client_options, java_gcs_client_options_password));
return ray::gcs::GcsClientOptions(ip, port, password, /*is_test_client=*/false);
}
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jlong JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeInitCoreWorker(
JNIEnv *env, jclass, jint workerMode, jstring storeSocket, jstring rayletSocket,
jstring nodeIpAddress, jint nodeManagerPort, jbyteArray jobId,
jobject gcsClientOptions) {
auto native_store_socket = JavaStringToNativeString(env, storeSocket);
auto native_raylet_socket = JavaStringToNativeString(env, rayletSocket);
auto job_id = JavaByteArrayToId<ray::JobID>(env, jobId);
auto gcs_client_options = ToGcsClientOptions(env, gcsClientOptions);
auto node_ip_address = JavaStringToNativeString(env, nodeIpAddress);
auto task_execution_callback =
[](ray::TaskType task_type, const ray::RayFunction &ray_function,
const std::unordered_map<std::string, double> &required_resources,
const std::vector<std::shared_ptr<ray::RayObject>> &args,
const std::vector<ObjectID> &arg_reference_ids,
const std::vector<ObjectID> &return_ids,
std::vector<std::shared_ptr<ray::RayObject>> *results) {
JNIEnv *env = local_env;
RAY_CHECK(env);
RAY_CHECK(local_java_task_executor);
// convert RayFunction
jobject ray_function_array_list =
NativeStringVectorToJavaStringList(env, ray_function.GetFunctionDescriptor());
// convert args
// TODO (kfstorm): Avoid copying binary data from Java to C++
jobject args_array_list = NativeVectorToJavaList<std::shared_ptr<ray::RayObject>>(
env, args, NativeRayObjectToJavaNativeRayObject);
// invoke Java method
jobject java_return_objects =
env->CallObjectMethod(local_java_task_executor, java_task_executor_execute,
ray_function_array_list, args_array_list);
RAY_CHECK_JAVA_EXCEPTION(env);
std::vector<std::shared_ptr<ray::RayObject>> return_objects;
JavaListToNativeVector<std::shared_ptr<ray::RayObject>>(
env, java_return_objects, &return_objects,
[](JNIEnv *env, jobject java_native_ray_object) {
return JavaNativeRayObjectToNativeRayObject(env, java_native_ray_object);
});
for (auto &obj : return_objects) {
results->push_back(obj);
}
env->DeleteLocalRef(java_return_objects);
env->DeleteLocalRef(args_array_list);
env->DeleteLocalRef(ray_function_array_list);
return ray::Status::OK();
};
try {
auto core_worker = new ray::CoreWorker(
static_cast<ray::WorkerType>(workerMode), ::Language::JAVA, native_store_socket,
native_raylet_socket, job_id, gcs_client_options, /*log_dir=*/"", node_ip_address,
nodeManagerPort, task_execution_callback);
return reinterpret_cast<jlong>(core_worker);
} catch (const std::exception &e) {
std::ostringstream oss;
oss << "Failed to construct core worker: " << e.what();
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, ray::Status::Invalid(oss.str()), 0);
return 0; // To make compiler no complain
}
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeRunTaskExecutor(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer, jobject javaTaskExecutor) {
local_env = env;
local_java_task_executor = javaTaskExecutor;
auto core_worker = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
core_worker->StartExecutingTasks();
local_env = nullptr;
local_java_task_executor = nullptr;
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeDestroyCoreWorker(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer) {
auto core_worker = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
core_worker->Disconnect();
delete core_worker;
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeSetup(JNIEnv *env,
jclass,
jstring logDir) {
std::string log_dir = JavaStringToNativeString(env, logDir);
ray::RayLog::StartRayLog("java_worker", ray::RayLogLevel::INFO, log_dir);
// TODO (kfstorm): We can't InstallFailureSignalHandler here, because JVM already
// installed its own signal handler. It's possible to fix this by chaining signal
// handlers. But it's not easy. See
// https://docs.oracle.com/javase/9/troubleshoot/handle-signals-and-exceptions.htm.
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeShutdownHook(JNIEnv *,
jclass) {
ray::RayLog::ShutDownRayLog();
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeSetResource(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jstring resourceName,
jdouble capacity, jbyteArray nodeId) {
const auto node_id = JavaByteArrayToId<ClientID>(env, nodeId);
const char *native_resource_name = env->GetStringUTFChars(resourceName, JNI_FALSE);
auto &raylet_client =
reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)->GetRayletClient();
auto status = raylet_client.SetResource(native_resource_name,
static_cast<double>(capacity), node_id);
env->ReleaseStringUTFChars(resourceName, native_resource_name);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, (void)0);
}
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeKillActor(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jbyteArray actorId) {
auto core_worker = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
auto status = core_worker->KillActor(JavaByteArrayToId<ActorID>(env, actorId));
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, (void)0);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_RayNativeRuntime.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_RayNativeRuntime */
#ifndef _Included_org_ray_runtime_RayNativeRuntime
#define _Included_org_ray_runtime_RayNativeRuntime
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeInitCoreWorker
* Signature:
* (ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;I[BLorg/ray/runtime/gcs/GcsClientOptions;)J
*/
JNIEXPORT jlong JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeInitCoreWorker(
JNIEnv *, jclass, jint, jstring, jstring, jstring, jint, jbyteArray, jobject);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeRunTaskExecutor
* Signature: (JLorg/ray/runtime/task/TaskExecutor;)V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeRunTaskExecutor(
JNIEnv *, jclass, jlong, jobject);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeDestroyCoreWorker
* Signature: (J)V
*/
JNIEXPORT void JNICALL
Java_org_ray_runtime_RayNativeRuntime_nativeDestroyCoreWorker(JNIEnv *, jclass, jlong);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeSetup
* Signature: (Ljava/lang/String;)V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeSetup(JNIEnv *, jclass,
jstring);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeShutdownHook
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeShutdownHook(JNIEnv *,
jclass);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeSetResource
* Signature: (JLjava/lang/String;D[B)V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeSetResource(
JNIEnv *, jclass, jlong, jstring, jdouble, jbyteArray);
/*
* Class: org_ray_runtime_RayNativeRuntime
* Method: nativeKillActor
* Signature: (J[B)V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_RayNativeRuntime_nativeKillActor(JNIEnv *,
jclass,
jlong,
jbyteArray);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_actor_NativeRayActor.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_actor_NativeRayActor.h"
#include <jni.h>
#include "ray/common/id.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
inline ray::CoreWorker &GetCoreWorker(jlong nativeCoreWorkerPointer) {
return *reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
}
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jboolean JNICALL
Java_org_ray_runtime_actor_NativeRayActor_nativeIsDirectCallActor(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer, jbyteArray actorId) {
auto actor_id = JavaByteArrayToId<ray::ActorID>(env, actorId);
ray::ActorHandle *native_actor_handle = nullptr;
auto status = GetCoreWorker(nativeCoreWorkerPointer)
.GetActorHandle(actor_id, &native_actor_handle);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, false);
return native_actor_handle->IsDirectCallActor();
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_actor_NativeRayActor_nativeGetActorCreationTaskFunctionDescriptor(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer, jbyteArray actorId) {
auto actor_id = JavaByteArrayToId<ray::ActorID>(env, actorId);
ray::ActorHandle *native_actor_handle = nullptr;
auto status = GetCoreWorker(nativeCoreWorkerPointer)
.GetActorHandle(actor_id, &native_actor_handle);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
auto function_descriptor = native_actor_handle->ActorCreationTaskFunctionDescriptor();
return NativeStringVectorToJavaStringList(env, function_descriptor);
}
JNIEXPORT jbyteArray JNICALL Java_org_ray_runtime_actor_NativeRayActor_nativeSerialize(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer, jbyteArray actorId) {
auto actor_id = JavaByteArrayToId<ray::ActorID>(env, actorId);
std::string output;
ray::Status status =
GetCoreWorker(nativeCoreWorkerPointer).SerializeActorHandle(actor_id, &output);
jbyteArray bytes = env->NewByteArray(output.size());
env->SetByteArrayRegion(bytes, 0, output.size(),
reinterpret_cast<const jbyte *>(output.c_str()));
return bytes;
}
JNIEXPORT jbyteArray JNICALL Java_org_ray_runtime_actor_NativeRayActor_nativeDeserialize(
JNIEnv *env, jclass o, jlong nativeCoreWorkerPointer, jbyteArray data) {
auto buffer = JavaByteArrayToNativeBuffer(env, data);
RAY_CHECK(buffer->Size() > 0);
auto binary = std::string(reinterpret_cast<char *>(buffer->Data()), buffer->Size());
auto actor_id =
GetCoreWorker(nativeCoreWorkerPointer).DeserializeAndRegisterActorHandle(binary);
return IdToJavaByteArray<ray::ActorID>(env, actor_id);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_actor_NativeRayActor.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_actor_NativeRayActor */
#ifndef _Included_org_ray_runtime_actor_NativeRayActor
#define _Included_org_ray_runtime_actor_NativeRayActor
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_ray_runtime_actor_NativeRayActor
* Method: nativeIsDirectCallActor
* Signature: (J[B)Z
*/
JNIEXPORT jboolean JNICALL
Java_org_ray_runtime_actor_NativeRayActor_nativeIsDirectCallActor(JNIEnv *, jclass, jlong,
jbyteArray);
/*
* Class: org_ray_runtime_actor_NativeRayActor
* Method: nativeGetActorCreationTaskFunctionDescriptor
* Signature: (J[B)Ljava/util/List;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_actor_NativeRayActor_nativeGetActorCreationTaskFunctionDescriptor(
JNIEnv *, jclass, jlong, jbyteArray);
/*
* Class: org_ray_runtime_actor_NativeRayActor
* Method: nativeSerialize
* Signature: (J[B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_org_ray_runtime_actor_NativeRayActor_nativeSerialize(
JNIEnv *, jclass, jlong, jbyteArray);
/*
* Class: org_ray_runtime_actor_NativeRayActor
* Method: nativeDeserialize
* Signature: (J[B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_org_ray_runtime_actor_NativeRayActor_nativeDeserialize(
JNIEnv *, jclass, jlong, jbyteArray);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_context_NativeWorkerContext.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_context_NativeWorkerContext.h"
#include <jni.h>
#include "ray/common/id.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
inline ray::WorkerContext &GetWorkerContextFromPointer(jlong nativeCoreWorkerPointer) {
return reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)->GetWorkerContext();
}
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jint JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentTaskType(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
auto task_spec = GetWorkerContextFromPointer(nativeCoreWorkerPointer).GetCurrentTask();
RAY_CHECK(task_spec) << "Current task is not set.";
return static_cast<int>(task_spec->GetMessage().type());
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentTaskId(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
const ray::TaskID &task_id =
GetWorkerContextFromPointer(nativeCoreWorkerPointer).GetCurrentTaskID();
return IdToJavaByteBuffer<ray::TaskID>(env, task_id);
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentJobId(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
const auto &job_id =
GetWorkerContextFromPointer(nativeCoreWorkerPointer).GetCurrentJobID();
return IdToJavaByteBuffer<ray::JobID>(env, job_id);
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentWorkerId(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
const auto &worker_id =
GetWorkerContextFromPointer(nativeCoreWorkerPointer).GetWorkerID();
return IdToJavaByteBuffer<ray::WorkerID>(env, worker_id);
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentActorId(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
const auto &actor_id =
GetWorkerContextFromPointer(nativeCoreWorkerPointer).GetCurrentActorID();
return IdToJavaByteBuffer<ray::ActorID>(env, actor_id);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_context_NativeWorkerContext.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_context_NativeWorkerContext */
#ifndef _Included_org_ray_runtime_context_NativeWorkerContext
#define _Included_org_ray_runtime_context_NativeWorkerContext
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_ray_runtime_context_NativeWorkerContext
* Method: nativeGetCurrentTaskType
* Signature: (J)I
*/
JNIEXPORT jint JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentTaskType(JNIEnv *,
jclass, jlong);
/*
* Class: org_ray_runtime_context_NativeWorkerContext
* Method: nativeGetCurrentTaskId
* Signature: (J)Ljava/nio/ByteBuffer;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentTaskId(JNIEnv *, jclass,
jlong);
/*
* Class: org_ray_runtime_context_NativeWorkerContext
* Method: nativeGetCurrentJobId
* Signature: (J)Ljava/nio/ByteBuffer;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentJobId(JNIEnv *, jclass,
jlong);
/*
* Class: org_ray_runtime_context_NativeWorkerContext
* Method: nativeGetCurrentWorkerId
* Signature: (J)Ljava/nio/ByteBuffer;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentWorkerId(JNIEnv *,
jclass, jlong);
/*
* Class: org_ray_runtime_context_NativeWorkerContext
* Method: nativeGetCurrentActorId
* Signature: (J)Ljava/nio/ByteBuffer;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_context_NativeWorkerContext_nativeGetCurrentActorId(JNIEnv *, jclass,
jlong);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_object_NativeObjectStore.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_object_NativeObjectStore.h"
#include <jni.h>
#include "ray/common/id.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_object_NativeObjectStore_nativePut__JLorg_ray_runtime_object_NativeRayObject_2(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jobject obj) {
auto ray_object = JavaNativeRayObjectToNativeRayObject(env, obj);
RAY_CHECK(ray_object != nullptr);
ray::ObjectID object_id;
auto status = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)
->Put(*ray_object, &object_id);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return IdToJavaByteArray<ray::ObjectID>(env, object_id);
}
JNIEXPORT void JNICALL
Java_org_ray_runtime_object_NativeObjectStore_nativePut__J_3BLorg_ray_runtime_object_NativeRayObject_2(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jbyteArray objectId,
jobject obj) {
auto object_id = JavaByteArrayToId<ray::ObjectID>(env, objectId);
auto ray_object = JavaNativeRayObjectToNativeRayObject(env, obj);
RAY_CHECK(ray_object != nullptr);
auto status = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)
->Put(*ray_object, object_id);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, (void)0);
}
JNIEXPORT jobject JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeGet(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jobject ids, jlong timeoutMs) {
std::vector<ray::ObjectID> object_ids;
JavaListToNativeVector<ray::ObjectID>(
env, ids, &object_ids, [](JNIEnv *env, jobject id) {
return JavaByteArrayToId<ray::ObjectID>(env, static_cast<jbyteArray>(id));
});
std::vector<std::shared_ptr<ray::RayObject>> results;
auto status = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)
->Get(object_ids, (int64_t)timeoutMs, &results);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return NativeVectorToJavaList<std::shared_ptr<ray::RayObject>>(
env, results, NativeRayObjectToJavaNativeRayObject);
}
JNIEXPORT jobject JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeWait(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jobject objectIds,
jint numObjects, jlong timeoutMs) {
std::vector<ray::ObjectID> object_ids;
JavaListToNativeVector<ray::ObjectID>(
env, objectIds, &object_ids, [](JNIEnv *env, jobject id) {
return JavaByteArrayToId<ray::ObjectID>(env, static_cast<jbyteArray>(id));
});
std::vector<bool> results;
auto status = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)
->Wait(object_ids, (int)numObjects, (int64_t)timeoutMs, &results);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return NativeVectorToJavaList<bool>(env, results, [](JNIEnv *env, const bool &item) {
return env->NewObject(java_boolean_class, java_boolean_init, (jboolean)item);
});
}
JNIEXPORT void JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeDelete(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jobject objectIds,
jboolean localOnly, jboolean deleteCreatingTasks) {
std::vector<ray::ObjectID> object_ids;
JavaListToNativeVector<ray::ObjectID>(
env, objectIds, &object_ids, [](JNIEnv *env, jobject id) {
return JavaByteArrayToId<ray::ObjectID>(env, static_cast<jbyteArray>(id));
});
auto status = reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer)
->Delete(object_ids, (bool)localOnly, (bool)deleteCreatingTasks);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, (void)0);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_object_NativeObjectStore.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_object_NativeObjectStore */
#ifndef _Included_org_ray_runtime_object_NativeObjectStore
#define _Included_org_ray_runtime_object_NativeObjectStore
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_ray_runtime_object_NativeObjectStore
* Method: nativePut
* Signature: (JLorg/ray/runtime/object/NativeRayObject;)[B
*/
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_object_NativeObjectStore_nativePut__JLorg_ray_runtime_object_NativeRayObject_2(
JNIEnv *, jclass, jlong, jobject);
/*
* Class: org_ray_runtime_object_NativeObjectStore
* Method: nativePut
* Signature: (J[BLorg/ray/runtime/object/NativeRayObject;)V
*/
JNIEXPORT void JNICALL
Java_org_ray_runtime_object_NativeObjectStore_nativePut__J_3BLorg_ray_runtime_object_NativeRayObject_2(
JNIEnv *, jclass, jlong, jbyteArray, jobject);
/*
* Class: org_ray_runtime_object_NativeObjectStore
* Method: nativeGet
* Signature: (JLjava/util/List;J)Ljava/util/List;
*/
JNIEXPORT jobject JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeGet(
JNIEnv *, jclass, jlong, jobject, jlong);
/*
* Class: org_ray_runtime_object_NativeObjectStore
* Method: nativeWait
* Signature: (JLjava/util/List;IJ)Ljava/util/List;
*/
JNIEXPORT jobject JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeWait(
JNIEnv *, jclass, jlong, jobject, jint, jlong);
/*
* Class: org_ray_runtime_object_NativeObjectStore
* Method: nativeDelete
* Signature: (JLjava/util/List;ZZ)V
*/
JNIEXPORT void JNICALL Java_org_ray_runtime_object_NativeObjectStore_nativeDelete(
JNIEnv *, jclass, jlong, jobject, jboolean, jboolean);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskExecutor.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskExecutor.h"
#include <jni.h>
#include "ray/common/id.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
#include "ray/raylet/raylet_client.h"
#ifdef __cplusplus
extern "C" {
#endif
using ray::ClientID;
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_task_NativeTaskExecutor_nativePrepareCheckpoint(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer) {
auto &core_worker = *reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
const auto &actor_id = core_worker.GetWorkerContext().GetCurrentActorID();
const auto &task_spec = core_worker.GetWorkerContext().GetCurrentTask();
RAY_CHECK(task_spec->IsActorTask());
ActorCheckpointID checkpoint_id;
auto status =
core_worker.GetRayletClient().PrepareActorCheckpoint(actor_id, checkpoint_id);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
jbyteArray result = env->NewByteArray(checkpoint_id.Size());
env->SetByteArrayRegion(result, 0, checkpoint_id.Size(),
reinterpret_cast<const jbyte *>(checkpoint_id.Data()));
return result;
}
JNIEXPORT void JNICALL
Java_org_ray_runtime_task_NativeTaskExecutor_nativeNotifyActorResumedFromCheckpoint(
JNIEnv *env, jclass, jlong nativeCoreWorkerPointer, jbyteArray checkpointId) {
auto &core_worker = *reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
const auto &actor_id = core_worker.GetWorkerContext().GetCurrentActorID();
const auto checkpoint_id = JavaByteArrayToId<ActorCheckpointID>(env, checkpointId);
auto status = core_worker.GetRayletClient().NotifyActorResumedFromCheckpoint(
actor_id, checkpoint_id);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, (void)0);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskExecutor.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_task_NativeTaskExecutor */
#ifndef _Included_org_ray_runtime_task_NativeTaskExecutor
#define _Included_org_ray_runtime_task_NativeTaskExecutor
#ifdef __cplusplus
extern "C" {
#endif
#undef org_ray_runtime_task_NativeTaskExecutor_NUM_ACTOR_CHECKPOINTS_TO_KEEP
#define org_ray_runtime_task_NativeTaskExecutor_NUM_ACTOR_CHECKPOINTS_TO_KEEP 20L
/*
* Class: org_ray_runtime_task_NativeTaskExecutor
* Method: nativePrepareCheckpoint
* Signature: (J)[B
*/
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_task_NativeTaskExecutor_nativePrepareCheckpoint(JNIEnv *, jclass,
jlong);
/*
* Class: org_ray_runtime_task_NativeTaskExecutor
* Method: nativeNotifyActorResumedFromCheckpoint
* Signature: (J[B)V
*/
JNIEXPORT void JNICALL
Java_org_ray_runtime_task_NativeTaskExecutor_nativeNotifyActorResumedFromCheckpoint(
JNIEnv *, jclass, jlong, jbyteArray);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskSubmitter.cc
|
C++
|
#include "ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskSubmitter.h"
#include <jni.h>
#include "ray/common/id.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/core_worker.h"
#include "ray/core_worker/lib/java/jni_utils.h"
inline ray::CoreWorker &GetCoreWorker(jlong nativeCoreWorkerPointer) {
return *reinterpret_cast<ray::CoreWorker *>(nativeCoreWorkerPointer);
}
inline ray::RayFunction ToRayFunction(JNIEnv *env, jobject functionDescriptor) {
std::vector<std::string> function_descriptor;
jobject list =
env->CallObjectMethod(functionDescriptor, java_function_descriptor_to_list);
RAY_CHECK_JAVA_EXCEPTION(env);
JavaStringListToNativeStringVector(env, list, &function_descriptor);
jobject java_language =
env->CallObjectMethod(functionDescriptor, java_function_descriptor_get_language);
RAY_CHECK_JAVA_EXCEPTION(env);
int language = env->CallIntMethod(java_language, java_language_get_number);
RAY_CHECK_JAVA_EXCEPTION(env);
ray::RayFunction ray_function{static_cast<::Language>(language), function_descriptor};
return ray_function;
}
inline std::vector<ray::TaskArg> ToTaskArgs(JNIEnv *env, jobject args) {
std::vector<ray::TaskArg> task_args;
JavaListToNativeVector<ray::TaskArg>(
env, args, &task_args, [](JNIEnv *env, jobject arg) {
auto java_id = env->GetObjectField(arg, java_function_arg_id);
if (java_id) {
auto java_id_bytes = static_cast<jbyteArray>(
env->CallObjectMethod(java_id, java_base_id_get_bytes));
RAY_CHECK_JAVA_EXCEPTION(env);
return ray::TaskArg::PassByReference(
JavaByteArrayToId<ray::ObjectID>(env, java_id_bytes));
}
auto java_value =
static_cast<jbyteArray>(env->GetObjectField(arg, java_function_arg_value));
RAY_CHECK(java_value) << "Both id and value of FunctionArg are null.";
auto value = JavaNativeRayObjectToNativeRayObject(env, java_value);
return ray::TaskArg::PassByValue(value);
});
return task_args;
}
inline std::unordered_map<std::string, double> ToResources(JNIEnv *env,
jobject java_resources) {
std::unordered_map<std::string, double> resources;
if (java_resources) {
jobject entry_set = env->CallObjectMethod(java_resources, java_map_entry_set);
RAY_CHECK_JAVA_EXCEPTION(env);
jobject iterator = env->CallObjectMethod(entry_set, java_set_iterator);
RAY_CHECK_JAVA_EXCEPTION(env);
while (env->CallBooleanMethod(iterator, java_iterator_has_next)) {
RAY_CHECK_JAVA_EXCEPTION(env);
jobject map_entry = env->CallObjectMethod(iterator, java_iterator_next);
RAY_CHECK_JAVA_EXCEPTION(env);
auto java_key = (jstring)env->CallObjectMethod(map_entry, java_map_entry_get_key);
RAY_CHECK_JAVA_EXCEPTION(env);
std::string key = JavaStringToNativeString(env, java_key);
auto java_value = env->CallObjectMethod(map_entry, java_map_entry_get_value);
RAY_CHECK_JAVA_EXCEPTION(env);
double value = env->CallDoubleMethod(java_value, java_double_double_value);
RAY_CHECK_JAVA_EXCEPTION(env);
resources.emplace(key, value);
}
RAY_CHECK_JAVA_EXCEPTION(env);
}
return resources;
}
inline ray::TaskOptions ToTaskOptions(JNIEnv *env, jint numReturns, jobject callOptions) {
std::unordered_map<std::string, double> resources;
if (callOptions) {
jobject java_resources =
env->GetObjectField(callOptions, java_base_task_options_resources);
resources = ToResources(env, java_resources);
}
ray::TaskOptions task_options{numReturns, /*is_direct_call=*/false, resources};
return task_options;
}
inline ray::ActorCreationOptions ToActorCreationOptions(JNIEnv *env,
jobject actorCreationOptions) {
uint64_t max_reconstructions = 0;
bool use_direct_call;
std::unordered_map<std::string, double> resources;
std::vector<std::string> dynamic_worker_options;
if (actorCreationOptions) {
max_reconstructions = static_cast<uint64_t>(env->GetIntField(
actorCreationOptions, java_actor_creation_options_max_reconstructions));
use_direct_call = env->GetBooleanField(actorCreationOptions,
java_actor_creation_options_use_direct_call);
jobject java_resources =
env->GetObjectField(actorCreationOptions, java_base_task_options_resources);
resources = ToResources(env, java_resources);
jstring java_jvm_options = (jstring)env->GetObjectField(
actorCreationOptions, java_actor_creation_options_jvm_options);
if (java_jvm_options) {
std::string jvm_options = JavaStringToNativeString(env, java_jvm_options);
dynamic_worker_options.emplace_back(jvm_options);
}
} else {
use_direct_call =
env->GetStaticBooleanField(java_actor_creation_options_class,
java_actor_creation_options_default_use_direct_call);
}
ray::ActorCreationOptions actor_creation_options{
static_cast<uint64_t>(max_reconstructions),
use_direct_call,
/*max_concurrency=*/1,
resources,
resources,
dynamic_worker_options,
/*is_detached=*/false,
/*is_asyncio=*/false};
return actor_creation_options;
}
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jobject JNICALL Java_org_ray_runtime_task_NativeTaskSubmitter_nativeSubmitTask(
JNIEnv *env, jclass p, jlong nativeCoreWorkerPointer, jobject functionDescriptor,
jobject args, jint numReturns, jobject callOptions) {
auto ray_function = ToRayFunction(env, functionDescriptor);
auto task_args = ToTaskArgs(env, args);
auto task_options = ToTaskOptions(env, numReturns, callOptions);
std::vector<ObjectID> return_ids;
auto status = GetCoreWorker(nativeCoreWorkerPointer)
.SubmitTask(ray_function, task_args, task_options, &return_ids, /*max_retries=*/1);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return NativeIdVectorToJavaByteArrayList(env, return_ids);
}
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_task_NativeTaskSubmitter_nativeCreateActor(
JNIEnv *env, jclass p, jlong nativeCoreWorkerPointer, jobject functionDescriptor,
jobject args, jobject actorCreationOptions) {
auto ray_function = ToRayFunction(env, functionDescriptor);
auto task_args = ToTaskArgs(env, args);
auto actor_creation_options = ToActorCreationOptions(env, actorCreationOptions);
ray::ActorID actor_id;
auto status =
GetCoreWorker(nativeCoreWorkerPointer)
.CreateActor(ray_function, task_args, actor_creation_options, &actor_id);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return IdToJavaByteArray<ray::ActorID>(env, actor_id);
}
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_task_NativeTaskSubmitter_nativeSubmitActorTask(
JNIEnv *env, jclass p, jlong nativeCoreWorkerPointer, jbyteArray actorId,
jobject functionDescriptor, jobject args, jint numReturns, jobject callOptions) {
auto actor_id = JavaByteArrayToId<ray::ActorID>(env, actorId);
auto ray_function = ToRayFunction(env, functionDescriptor);
auto task_args = ToTaskArgs(env, args);
auto task_options = ToTaskOptions(env, numReturns, callOptions);
std::vector<ObjectID> return_ids;
auto status =
GetCoreWorker(nativeCoreWorkerPointer)
.SubmitActorTask(actor_id, ray_function, task_args, task_options, &return_ids);
THROW_EXCEPTION_AND_RETURN_IF_NOT_OK(env, status, nullptr);
return NativeIdVectorToJavaByteArrayList(env, return_ids);
}
#ifdef __cplusplus
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/lib/java/org_ray_runtime_task_NativeTaskSubmitter.h
|
C/C++ Header
|
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class org_ray_runtime_task_NativeTaskSubmitter */
#ifndef _Included_org_ray_runtime_task_NativeTaskSubmitter
#define _Included_org_ray_runtime_task_NativeTaskSubmitter
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: org_ray_runtime_task_NativeTaskSubmitter
* Method: nativeSubmitTask
* Signature:
* (JLorg/ray/runtime/functionmanager/FunctionDescriptor;Ljava/util/List;ILorg/ray/api/options/CallOptions;)Ljava/util/List;
*/
JNIEXPORT jobject JNICALL Java_org_ray_runtime_task_NativeTaskSubmitter_nativeSubmitTask(
JNIEnv *, jclass, jlong, jobject, jobject, jint, jobject);
/*
* Class: org_ray_runtime_task_NativeTaskSubmitter
* Method: nativeCreateActor
* Signature:
* (JLorg/ray/runtime/functionmanager/FunctionDescriptor;Ljava/util/List;Lorg/ray/api/options/ActorCreationOptions;)[B
*/
JNIEXPORT jbyteArray JNICALL
Java_org_ray_runtime_task_NativeTaskSubmitter_nativeCreateActor(JNIEnv *, jclass, jlong,
jobject, jobject,
jobject);
/*
* Class: org_ray_runtime_task_NativeTaskSubmitter
* Method: nativeSubmitActorTask
* Signature:
* (J[BLorg/ray/runtime/functionmanager/FunctionDescriptor;Ljava/util/List;ILorg/ray/api/options/CallOptions;)Ljava/util/List;
*/
JNIEXPORT jobject JNICALL
Java_org_ray_runtime_task_NativeTaskSubmitter_nativeSubmitActorTask(JNIEnv *, jclass,
jlong, jbyteArray,
jobject, jobject,
jint, jobject);
#ifdef __cplusplus
}
#endif
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/profiling.cc
|
C++
|
#include "ray/core_worker/profiling.h"
#include <chrono>
namespace ray {
namespace worker {
ProfileEvent::ProfileEvent(const std::shared_ptr<Profiler> &profiler,
const std::string &event_type)
: profiler_(profiler) {
rpc_event_.set_event_type(event_type);
rpc_event_.set_start_time(absl::GetCurrentTimeNanos() / 1e9);
}
Profiler::Profiler(WorkerContext &worker_context, const std::string &node_ip_address,
boost::asio::io_service &io_service,
const std::shared_ptr<gcs::GcsClient> &gcs_client)
: io_service_(io_service),
timer_(io_service_, boost::asio::chrono::seconds(1)),
rpc_profile_data_(new rpc::ProfileTableData()),
gcs_client_(gcs_client) {
rpc_profile_data_->set_component_type(WorkerTypeString(worker_context.GetWorkerType()));
rpc_profile_data_->set_component_id(worker_context.GetWorkerID().Binary());
rpc_profile_data_->set_node_ip_address(node_ip_address);
timer_.async_wait(boost::bind(&Profiler::FlushEvents, this));
}
void Profiler::AddEvent(const rpc::ProfileTableData::ProfileEvent &event) {
absl::MutexLock lock(&mutex_);
rpc_profile_data_->add_profile_events()->CopyFrom(event);
}
void Profiler::FlushEvents() {
auto cur_profile_data = std::make_shared<rpc::ProfileTableData>();
{
absl::MutexLock lock(&mutex_);
if (rpc_profile_data_->profile_events_size() != 0) {
cur_profile_data->set_component_type(rpc_profile_data_->component_type());
cur_profile_data->set_component_id(rpc_profile_data_->component_id());
cur_profile_data->set_node_ip_address(rpc_profile_data_->node_ip_address());
rpc_profile_data_.swap(cur_profile_data);
}
}
if (cur_profile_data->profile_events_size() != 0) {
if (!gcs_client_->Stats().AsyncAddProfileData(cur_profile_data, nullptr).ok()) {
RAY_LOG(WARNING) << "Failed to push profile events to GCS.";
} else {
RAY_LOG(DEBUG) << "Pushed " << cur_profile_data->profile_events_size()
<< " events to GCS.";
}
}
// Reset the timer to 1 second from the previous expiration time to avoid drift.
timer_.expires_at(timer_.expiry() + boost::asio::chrono::seconds(1));
timer_.async_wait(boost::bind(&Profiler::FlushEvents, this));
}
} // namespace worker
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/profiling.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_PROFILING_H
#define RAY_CORE_WORKER_PROFILING_H
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "ray/core_worker/context.h"
#include "ray/gcs/redis_gcs_client.h"
namespace ray {
namespace worker {
class Profiler {
public:
Profiler(WorkerContext &worker_context, const std::string &node_ip_address,
boost::asio::io_service &io_service,
const std::shared_ptr<gcs::GcsClient> &gcs_client);
// Add an event to the queue to be flushed periodically.
void AddEvent(const rpc::ProfileTableData::ProfileEvent &event) LOCKS_EXCLUDED(mutex_);
private:
// Flush all of the events that have been added since last flush to the GCS.
void FlushEvents() LOCKS_EXCLUDED(mutex_);
// Mutex guarding rpc_profile_data_.
absl::Mutex mutex_;
// ASIO IO service event loop. Must be started by the caller.
boost::asio::io_service &io_service_;
// Timer used to periodically flush events to the GCS.
boost::asio::steady_timer timer_;
// RPC message containing profiling data. Holds the queue of profile events
// until they are flushed.
std::shared_ptr<rpc::ProfileTableData> rpc_profile_data_ GUARDED_BY(mutex_);
// Client to the GCS used to push profile events to it.
std::shared_ptr<gcs::GcsClient> gcs_client_;
};
class ProfileEvent {
public:
ProfileEvent(const std::shared_ptr<Profiler> &profiler, const std::string &event_type);
// Set the end time for the event and add it to the profiler.
~ProfileEvent() {
rpc_event_.set_end_time(absl::GetCurrentTimeNanos() / 1e9);
profiler_->AddEvent(rpc_event_);
}
// Set extra metadata for the event, which could change during the event.
void SetExtraData(const std::string &extra_data) {
rpc_event_.set_extra_data(extra_data);
}
private:
// shared_ptr to the profiler that this event will be added to when it is destructed.
std::shared_ptr<Profiler> profiler_;
// Underlying proto data structure that holds the event data.
rpc::ProfileTableData::ProfileEvent rpc_event_;
};
} // namespace worker
} // namespace ray
#endif // RAY_CORE_WORKER_PROFILING_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/reference_count.cc
|
C++
|
#include "ray/core_worker/reference_count.h"
namespace ray {
void ReferenceCounter::AddBorrowedObject(const ObjectID &object_id,
const TaskID &owner_id,
const rpc::Address &owner_address) {
absl::MutexLock lock(&mutex_);
auto it = object_id_refs_.find(object_id);
RAY_CHECK(it != object_id_refs_.end());
if (!it->second.owner.has_value()) {
it->second.owner = {owner_id, owner_address};
}
}
void ReferenceCounter::AddOwnedObject(const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address) {
absl::MutexLock lock(&mutex_);
RAY_CHECK(object_id_refs_.count(object_id) == 0)
<< "Tried to create an owned object that already exists: " << object_id;
// If the entry doesn't exist, we initialize the direct reference count to zero
// because this corresponds to a submitted task whose return ObjectID will be created
// in the frontend language, incrementing the reference count.
object_id_refs_.emplace(object_id, Reference(owner_id, owner_address));
}
void ReferenceCounter::AddLocalReference(const ObjectID &object_id) {
absl::MutexLock lock(&mutex_);
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
// NOTE: ownership info for these objects must be added later via AddBorrowedObject.
it = object_id_refs_.emplace(object_id, Reference()).first;
}
it->second.local_ref_count++;
}
void ReferenceCounter::RemoveLocalReference(const ObjectID &object_id,
std::vector<ObjectID> *deleted) {
absl::MutexLock lock(&mutex_);
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
RAY_LOG(WARNING) << "Tried to decrease ref count for nonexistent object ID: "
<< object_id;
return;
}
if (--it->second.local_ref_count == 0 && it->second.submitted_task_ref_count == 0) {
DeleteReferenceInternal(it, deleted);
}
}
void ReferenceCounter::AddSubmittedTaskReferences(
const std::vector<ObjectID> &object_ids) {
absl::MutexLock lock(&mutex_);
for (const ObjectID &object_id : object_ids) {
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
// This happens if a large argument is transparently passed by reference
// because we don't hold a Python reference to its ObjectID.
it = object_id_refs_.emplace(object_id, Reference()).first;
}
it->second.submitted_task_ref_count++;
}
}
void ReferenceCounter::RemoveSubmittedTaskReferences(
const std::vector<ObjectID> &object_ids, std::vector<ObjectID> *deleted) {
absl::MutexLock lock(&mutex_);
for (const ObjectID &object_id : object_ids) {
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
RAY_LOG(WARNING) << "Tried to decrease ref count for nonexistent object ID: "
<< object_id;
return;
}
if (--it->second.submitted_task_ref_count == 0 && it->second.local_ref_count == 0) {
DeleteReferenceInternal(it, deleted);
}
}
}
bool ReferenceCounter::GetOwner(const ObjectID &object_id, TaskID *owner_id,
rpc::Address *owner_address) const {
absl::MutexLock lock(&mutex_);
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
return false;
}
if (it->second.owner.has_value()) {
*owner_id = it->second.owner.value().first;
*owner_address = it->second.owner.value().second;
return true;
} else {
return false;
}
}
void ReferenceCounter::DeleteReferences(const std::vector<ObjectID> &object_ids) {
absl::MutexLock lock(&mutex_);
for (const ObjectID &object_id : object_ids) {
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
return;
}
DeleteReferenceInternal(it, nullptr);
}
}
void ReferenceCounter::DeleteReferenceInternal(
absl::flat_hash_map<ObjectID, Reference>::iterator it,
std::vector<ObjectID> *deleted) {
if (it->second.on_delete) {
it->second.on_delete(it->first);
}
if (deleted) {
deleted->push_back(it->first);
}
object_id_refs_.erase(it);
}
bool ReferenceCounter::SetDeleteCallback(
const ObjectID &object_id, const std::function<void(const ObjectID &)> callback) {
absl::MutexLock lock(&mutex_);
auto it = object_id_refs_.find(object_id);
if (it == object_id_refs_.end()) {
return false;
}
RAY_CHECK(!it->second.on_delete);
it->second.on_delete = callback;
return true;
}
bool ReferenceCounter::HasReference(const ObjectID &object_id) const {
absl::MutexLock lock(&mutex_);
return object_id_refs_.find(object_id) != object_id_refs_.end();
}
size_t ReferenceCounter::NumObjectIDsInScope() const {
absl::MutexLock lock(&mutex_);
return object_id_refs_.size();
}
std::unordered_set<ObjectID> ReferenceCounter::GetAllInScopeObjectIDs() const {
absl::MutexLock lock(&mutex_);
std::unordered_set<ObjectID> in_scope_object_ids;
in_scope_object_ids.reserve(object_id_refs_.size());
for (auto it : object_id_refs_) {
in_scope_object_ids.insert(it.first);
}
return in_scope_object_ids;
}
std::unordered_map<ObjectID, std::pair<size_t, size_t>>
ReferenceCounter::GetAllReferenceCounts() const {
absl::MutexLock lock(&mutex_);
std::unordered_map<ObjectID, std::pair<size_t, size_t>> all_ref_counts;
all_ref_counts.reserve(object_id_refs_.size());
for (auto it : object_id_refs_) {
all_ref_counts.emplace(it.first,
std::pair<size_t, size_t>(it.second.local_ref_count,
it.second.submitted_task_ref_count));
}
return all_ref_counts;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/reference_count.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_REF_COUNT_H
#define RAY_CORE_WORKER_REF_COUNT_H
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "ray/common/id.h"
#include "ray/protobuf/common.pb.h"
#include "ray/util/logging.h"
namespace ray {
/// Class used by the core worker to keep track of ObjectID reference counts for garbage
/// collection. This class is thread safe.
class ReferenceCounter {
public:
ReferenceCounter() {}
~ReferenceCounter() {}
/// Increase the reference count for the ObjectID by one. If there is no
/// entry for the ObjectID, one will be created. The object ID will not have
/// any owner information, since we don't know how it was created.
///
/// \param[in] object_id The object to to increment the count for.
void AddLocalReference(const ObjectID &object_id) LOCKS_EXCLUDED(mutex_);
/// Decrease the local reference count for the ObjectID by one.
///
/// \param[in] object_id The object to decrement the count for.
/// \param[out] deleted List to store objects that hit zero ref count.
void RemoveLocalReference(const ObjectID &object_id, std::vector<ObjectID> *deleted)
LOCKS_EXCLUDED(mutex_);
/// Add references for the provided object IDs that correspond to them being
/// dependencies to a submitted task.
///
/// \param[in] object_ids The object IDs to add references for.
void AddSubmittedTaskReferences(const std::vector<ObjectID> &object_ids)
LOCKS_EXCLUDED(mutex_);
/// Remove references for the provided object IDs that correspond to them being
/// dependencies to a submitted task. This should be called when inlined
/// dependencies are inlined or when the task finishes for plasma dependencies.
///
/// \param[in] object_ids The object IDs to remove references for.
/// \param[out] deleted The object IDs whos reference counts reached zero.
void RemoveSubmittedTaskReferences(const std::vector<ObjectID> &object_ids,
std::vector<ObjectID> *deleted)
LOCKS_EXCLUDED(mutex_);
/// Add an object that we own. The object may depend on other objects.
/// Dependencies for each ObjectID must be set at most once. The local
/// reference count for the ObjectID is set to zero, which assumes that an
/// ObjectID for it will be created in the language frontend after this call.
///
/// TODO(swang): We could avoid copying the owner_id and owner_address since
/// we are the owner, but it is easier to store a copy for now, since the
/// owner ID will change for workers executing normal tasks and it is
/// possible to have leftover references after a task has finished.
///
/// \param[in] object_id The ID of the object that we own.
/// \param[in] owner_id The ID of the object's owner.
/// \param[in] owner_address The address of the object's owner.
/// \param[in] dependencies The objects that the object depends on.
void AddOwnedObject(const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address) LOCKS_EXCLUDED(mutex_);
/// Add an object that we are borrowing.
///
/// \param[in] object_id The ID of the object that we are borrowing.
/// \param[in] owner_id The ID of the owner of the object. This is either the
/// task ID (for non-actors) or the actor ID of the owner.
/// \param[in] owner_address The owner's address.
void AddBorrowedObject(const ObjectID &object_id, const TaskID &owner_id,
const rpc::Address &owner_address) LOCKS_EXCLUDED(mutex_);
/// Get the owner ID and address of the given object.
///
/// \param[in] object_id The ID of the object to look up.
/// \param[out] owner_id The TaskID of the object owner.
/// \param[out] owner_address The address of the object owner.
bool GetOwner(const ObjectID &object_id, TaskID *owner_id,
rpc::Address *owner_address) const LOCKS_EXCLUDED(mutex_);
/// Manually delete the objects from the reference counter.
void DeleteReferences(const std::vector<ObjectID> &object_ids) LOCKS_EXCLUDED(mutex_);
/// Sets the callback that will be run when the object goes out of scope.
/// Returns true if the object was in scope and the callback was added, else false.
bool SetDeleteCallback(const ObjectID &object_id,
const std::function<void(const ObjectID &)> callback)
LOCKS_EXCLUDED(mutex_);
/// Returns the total number of ObjectIDs currently in scope.
size_t NumObjectIDsInScope() const LOCKS_EXCLUDED(mutex_);
/// Returns whether this object has an active reference.
bool HasReference(const ObjectID &object_id) const LOCKS_EXCLUDED(mutex_);
/// Returns a set of all ObjectIDs currently in scope (i.e., nonzero reference count).
std::unordered_set<ObjectID> GetAllInScopeObjectIDs() const LOCKS_EXCLUDED(mutex_);
/// Returns a map of all ObjectIDs currently in scope with a pair of their
/// (local, submitted_task) reference counts. For debugging purposes.
std::unordered_map<ObjectID, std::pair<size_t, size_t>> GetAllReferenceCounts() const
LOCKS_EXCLUDED(mutex_);
private:
/// Metadata for an ObjectID reference in the language frontend.
struct Reference {
/// Constructor for a reference whose origin is unknown.
Reference() : owned_by_us(false) {}
/// Constructor for a reference that we created.
Reference(const TaskID &owner_id, const rpc::Address &owner_address)
: owned_by_us(true), owner({owner_id, owner_address}) {}
/// The local ref count for the ObjectID in the language frontend.
size_t local_ref_count = 0;
/// The ref count for submitted tasks that depend on the ObjectID.
size_t submitted_task_ref_count = 0;
/// Whether we own the object. If we own the object, then we are
/// responsible for tracking the state of the task that creates the object
/// (see task_manager.h).
bool owned_by_us;
/// The object's owner, if we know it. This has no value if the object is
/// if we do not know the object's owner (because distributed ref counting
/// is not yet implemented).
absl::optional<std::pair<TaskID, rpc::Address>> owner;
/// Callback that will be called when this ObjectID no longer has references.
std::function<void(const ObjectID &)> on_delete;
};
/// Helper method to delete an entry from the reference map and run any necessary
/// callbacks. Assumes that the entry is in object_id_refs_ and invalidates the
/// iterator.
void DeleteReferenceInternal(absl::flat_hash_map<ObjectID, Reference>::iterator entry,
std::vector<ObjectID> *deleted)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
/// Protects access to the reference counting state.
mutable absl::Mutex mutex_;
/// Holds all reference counts and dependency information for tracked ObjectIDs.
absl::flat_hash_map<ObjectID, Reference> object_id_refs_ GUARDED_BY(mutex_);
};
} // namespace ray
#endif // RAY_CORE_WORKER_REF_COUNT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/reference_count_test.cc
|
C++
|
#include "ray/core_worker/reference_count.h"
#include <vector>
#include "gtest/gtest.h"
#include "ray/common/ray_object.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
namespace ray {
class ReferenceCountTest : public ::testing::Test {
protected:
std::unique_ptr<ReferenceCounter> rc;
virtual void SetUp() { rc = std::unique_ptr<ReferenceCounter>(new ReferenceCounter); }
virtual void TearDown() {}
};
// Tests basic incrementing/decrementing of direct/submitted task reference counts. An
// entry should only be removed once both of its reference counts reach zero.
TEST_F(ReferenceCountTest, TestBasic) {
std::vector<ObjectID> out;
ObjectID id1 = ObjectID::FromRandom();
ObjectID id2 = ObjectID::FromRandom();
// Local references.
rc->AddLocalReference(id1);
rc->AddLocalReference(id1);
rc->AddLocalReference(id2);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
rc->RemoveLocalReference(id1, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
ASSERT_EQ(out.size(), 0);
rc->RemoveLocalReference(id2, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 1);
ASSERT_EQ(out.size(), 1);
rc->RemoveLocalReference(id1, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 0);
ASSERT_EQ(out.size(), 2);
out.clear();
// Submitted task references.
rc->AddSubmittedTaskReferences({id1});
rc->AddSubmittedTaskReferences({id1, id2});
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
rc->RemoveSubmittedTaskReferences({id1}, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
ASSERT_EQ(out.size(), 0);
rc->RemoveSubmittedTaskReferences({id2}, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 1);
ASSERT_EQ(out.size(), 1);
rc->RemoveSubmittedTaskReferences({id1}, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 0);
ASSERT_EQ(out.size(), 2);
out.clear();
// Local & submitted task references.
rc->AddLocalReference(id1);
rc->AddSubmittedTaskReferences({id1, id2});
rc->AddLocalReference(id2);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
rc->RemoveLocalReference(id1, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
ASSERT_EQ(out.size(), 0);
rc->RemoveSubmittedTaskReferences({id2}, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 2);
ASSERT_EQ(out.size(), 0);
rc->RemoveSubmittedTaskReferences({id1}, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 1);
ASSERT_EQ(out.size(), 1);
rc->RemoveLocalReference(id2, &out);
ASSERT_EQ(rc->NumObjectIDsInScope(), 0);
ASSERT_EQ(out.size(), 2);
out.clear();
}
// Tests that we can get the owner address correctly for objects that we own,
// objects that we borrowed via a serialized object ID, and objects whose
// origin we do not know.
TEST_F(ReferenceCountTest, TestOwnerAddress) {
auto object_id = ObjectID::FromRandom();
TaskID task_id = TaskID::ForFakeTask();
rpc::Address address;
address.set_ip_address("1234");
rc->AddOwnedObject(object_id, task_id, address);
TaskID added_id;
rpc::Address added_address;
ASSERT_TRUE(rc->GetOwner(object_id, &added_id, &added_address));
ASSERT_EQ(task_id, added_id);
ASSERT_EQ(address.ip_address(), added_address.ip_address());
auto object_id2 = ObjectID::FromRandom();
task_id = TaskID::ForFakeTask();
address.set_ip_address("5678");
rc->AddOwnedObject(object_id2, task_id, address);
ASSERT_TRUE(rc->GetOwner(object_id2, &added_id, &added_address));
ASSERT_EQ(task_id, added_id);
ASSERT_EQ(address.ip_address(), added_address.ip_address());
auto object_id3 = ObjectID::FromRandom();
ASSERT_FALSE(rc->GetOwner(object_id3, &added_id, &added_address));
rc->AddLocalReference(object_id3);
ASSERT_FALSE(rc->GetOwner(object_id3, &added_id, &added_address));
}
// Tests that the ref counts are properly integrated into the local
// object memory store.
TEST(MemoryStoreIntegrationTest, TestSimple) {
ObjectID id1 = ObjectID::FromRandom().WithDirectTransportType();
ObjectID id2 = ObjectID::FromRandom().WithDirectTransportType();
uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8};
RayObject buffer(std::make_shared<LocalMemoryBuffer>(data, sizeof(data)), nullptr);
auto rc = std::shared_ptr<ReferenceCounter>(new ReferenceCounter());
CoreWorkerMemoryStore store(nullptr, rc);
// Tests putting an object with no references is ignored.
RAY_CHECK_OK(store.Put(buffer, id2));
ASSERT_EQ(store.Size(), 0);
// Tests ref counting overrides remove after get option.
rc->AddLocalReference(id1);
RAY_CHECK_OK(store.Put(buffer, id1));
ASSERT_EQ(store.Size(), 1);
std::vector<std::shared_ptr<RayObject>> results;
WorkerContext ctx(WorkerType::WORKER, JobID::Nil());
RAY_CHECK_OK(store.Get({id1}, /*num_objects*/ 1, /*timeout_ms*/ -1, ctx,
/*remove_after_get*/ true, &results));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(store.Size(), 1);
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/store_provider/memory_store/memory_store.cc
|
C++
|
#include <condition_variable>
#include "ray/common/ray_config.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/core_worker.h"
namespace ray {
/// A class that represents a `Get` request.
class GetRequest {
public:
GetRequest(absl::flat_hash_set<ObjectID> object_ids, size_t num_objects,
bool remove_after_get);
const absl::flat_hash_set<ObjectID> &ObjectIds() const;
/// Wait until all requested objects are available, or timeout happens.
///
/// \param timeout_ms The maximum time in milliseconds to wait for.
/// \return Whether all requested objects are available.
bool Wait(int64_t timeout_ms);
/// Set the object content for the specific object id.
void Set(const ObjectID &object_id, std::shared_ptr<RayObject> buffer);
/// Get the object content for the specific object id.
std::shared_ptr<RayObject> Get(const ObjectID &object_id) const;
/// Whether this is a `get` request.
bool ShouldRemoveObjects() const;
private:
/// Wait until all requested objects are available.
void Wait();
/// The object IDs involved in this request.
const absl::flat_hash_set<ObjectID> object_ids_;
/// The object information for the objects in this request.
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> objects_;
/// Number of objects required.
const size_t num_objects_;
// Whether the requested objects should be removed from store
// after `get` returns.
const bool remove_after_get_;
// Whether all the requested objects are available.
bool is_ready_;
mutable std::mutex mutex_;
std::condition_variable cv_;
};
GetRequest::GetRequest(absl::flat_hash_set<ObjectID> object_ids, size_t num_objects,
bool remove_after_get)
: object_ids_(std::move(object_ids)),
num_objects_(num_objects),
remove_after_get_(remove_after_get),
is_ready_(false) {
RAY_CHECK(num_objects_ <= object_ids_.size());
}
const absl::flat_hash_set<ObjectID> &GetRequest::ObjectIds() const { return object_ids_; }
bool GetRequest::ShouldRemoveObjects() const { return remove_after_get_; }
bool GetRequest::Wait(int64_t timeout_ms) {
RAY_CHECK(timeout_ms >= 0 || timeout_ms == -1);
if (timeout_ms == -1) {
// Wait forever until all objects are ready.
Wait();
return true;
}
// Wait until all objects are ready, or the timeout expires.
std::unique_lock<std::mutex> lock(mutex_);
while (!is_ready_) {
auto status = cv_.wait_for(lock, std::chrono::milliseconds(timeout_ms));
if (status == std::cv_status::timeout) {
return false;
}
}
return true;
}
void GetRequest::Wait() {
std::unique_lock<std::mutex> lock(mutex_);
while (!is_ready_) {
cv_.wait(lock);
}
}
void GetRequest::Set(const ObjectID &object_id, std::shared_ptr<RayObject> object) {
std::unique_lock<std::mutex> lock(mutex_);
if (is_ready_) {
return; // We have already hit the number of objects to return limit.
}
objects_.emplace(object_id, object);
if (objects_.size() == num_objects_) {
is_ready_ = true;
cv_.notify_all();
}
}
std::shared_ptr<RayObject> GetRequest::Get(const ObjectID &object_id) const {
std::unique_lock<std::mutex> lock(mutex_);
auto iter = objects_.find(object_id);
if (iter != objects_.end()) {
return iter->second;
}
return nullptr;
}
CoreWorkerMemoryStore::CoreWorkerMemoryStore(
std::function<void(const RayObject &, const ObjectID &)> store_in_plasma,
std::shared_ptr<ReferenceCounter> counter,
std::shared_ptr<raylet::RayletClient> raylet_client)
: store_in_plasma_(store_in_plasma),
ref_counter_(counter),
raylet_client_(raylet_client) {}
void CoreWorkerMemoryStore::GetAsync(
const ObjectID &object_id, std::function<void(std::shared_ptr<RayObject>)> callback) {
std::shared_ptr<RayObject> ptr;
{
absl::MutexLock lock(&mu_);
auto iter = objects_.find(object_id);
if (iter != objects_.end()) {
ptr = iter->second;
} else {
object_async_get_requests_[object_id].push_back(callback);
}
}
// It's important for performance to run the callback outside the lock.
if (ptr != nullptr) {
callback(ptr);
}
}
std::shared_ptr<RayObject> CoreWorkerMemoryStore::GetOrPromoteToPlasma(
const ObjectID &object_id) {
absl::MutexLock lock(&mu_);
auto iter = objects_.find(object_id);
if (iter != objects_.end()) {
auto obj = iter->second;
if (obj->IsInPlasmaError()) {
return nullptr;
}
return obj;
}
RAY_CHECK(store_in_plasma_ != nullptr)
<< "Cannot promote object without plasma provider callback.";
promoted_to_plasma_.insert(object_id);
return nullptr;
}
Status CoreWorkerMemoryStore::Put(const RayObject &object, const ObjectID &object_id) {
RAY_CHECK(object_id.IsDirectCallType());
std::vector<std::function<void(std::shared_ptr<RayObject>)>> async_callbacks;
auto object_entry =
std::make_shared<RayObject>(object.GetData(), object.GetMetadata(), true);
{
absl::MutexLock lock(&mu_);
auto iter = objects_.find(object_id);
if (iter != objects_.end()) {
return Status::OK(); // Object already exists in the store, which is fine.
}
auto async_callback_it = object_async_get_requests_.find(object_id);
if (async_callback_it != object_async_get_requests_.end()) {
auto &callbacks = async_callback_it->second;
async_callbacks = std::move(callbacks);
object_async_get_requests_.erase(async_callback_it);
}
auto promoted_it = promoted_to_plasma_.find(object_id);
if (promoted_it != promoted_to_plasma_.end()) {
RAY_CHECK(store_in_plasma_ != nullptr);
if (!object.IsInPlasmaError()) {
// Only need to promote to plasma if it wasn't already put into plasma
// by the task that created the object.
store_in_plasma_(object, object_id);
}
promoted_to_plasma_.erase(promoted_it);
}
bool should_add_entry = true;
auto object_request_iter = object_get_requests_.find(object_id);
if (object_request_iter != object_get_requests_.end()) {
auto &get_requests = object_request_iter->second;
for (auto &get_request : get_requests) {
get_request->Set(object_id, object_entry);
// If ref counting is enabled, override the removal behaviour.
if (get_request->ShouldRemoveObjects() && ref_counter_ == nullptr) {
should_add_entry = false;
}
}
}
// Don't put it in the store, since we won't get a callback for deletion.
if (ref_counter_ != nullptr && !ref_counter_->HasReference(object_id)) {
should_add_entry = false;
}
if (should_add_entry) {
// If there is no existing get request, then add the `RayObject` to map.
objects_.emplace(object_id, object_entry);
}
}
// It's important for performance to run the callbacks outside the lock.
for (const auto &cb : async_callbacks) {
cb(object_entry);
}
return Status::OK();
}
Status CoreWorkerMemoryStore::Get(const std::vector<ObjectID> &object_ids,
int num_objects, int64_t timeout_ms,
const WorkerContext &ctx, bool remove_after_get,
std::vector<std::shared_ptr<RayObject>> *results) {
(*results).resize(object_ids.size(), nullptr);
std::shared_ptr<GetRequest> get_request;
int count = 0;
{
absl::flat_hash_set<ObjectID> remaining_ids;
absl::flat_hash_set<ObjectID> ids_to_remove;
absl::MutexLock lock(&mu_);
// Check for existing objects and see if this get request can be fullfilled.
for (size_t i = 0; i < object_ids.size() && count < num_objects; i++) {
const auto &object_id = object_ids[i];
auto iter = objects_.find(object_id);
if (iter != objects_.end()) {
(*results)[i] = iter->second;
if (remove_after_get) {
// Note that we cannot remove the object_id from `objects_` now,
// because `object_ids` might have duplicate ids.
ids_to_remove.insert(object_id);
}
count += 1;
} else {
remaining_ids.insert(object_id);
}
}
RAY_CHECK(count <= num_objects);
// Clean up the objects if ref counting is off.
if (ref_counter_ == nullptr) {
for (const auto &object_id : ids_to_remove) {
objects_.erase(object_id);
}
}
// Return if all the objects are obtained.
if (remaining_ids.empty() || count >= num_objects) {
return Status::OK();
}
size_t required_objects = num_objects - (object_ids.size() - remaining_ids.size());
// Otherwise, create a GetRequest to track remaining objects.
get_request = std::make_shared<GetRequest>(std::move(remaining_ids), required_objects,
remove_after_get);
for (const auto &object_id : get_request->ObjectIds()) {
object_get_requests_[object_id].push_back(get_request);
}
}
// Only send block/unblock IPCs for non-actor tasks on the main thread.
bool should_notify_raylet =
(raylet_client_ != nullptr && ctx.ShouldReleaseResourcesOnBlockingCalls());
// Wait for remaining objects (or timeout).
if (should_notify_raylet) {
RAY_CHECK_OK(raylet_client_->NotifyDirectCallTaskBlocked());
}
bool done = get_request->Wait(timeout_ms);
if (should_notify_raylet) {
RAY_CHECK_OK(raylet_client_->NotifyDirectCallTaskUnblocked());
}
{
absl::MutexLock lock(&mu_);
// Populate results.
for (size_t i = 0; i < object_ids.size(); i++) {
const auto &object_id = object_ids[i];
if ((*results)[i] == nullptr) {
(*results)[i] = get_request->Get(object_id);
}
}
// Remove get request.
for (const auto &object_id : get_request->ObjectIds()) {
auto object_request_iter = object_get_requests_.find(object_id);
if (object_request_iter != object_get_requests_.end()) {
auto &get_requests = object_request_iter->second;
// Erase get_request from the vector.
auto it = std::find(get_requests.begin(), get_requests.end(), get_request);
if (it != get_requests.end()) {
get_requests.erase(it);
// If the vector is empty, remove the object ID from the map.
if (get_requests.empty()) {
object_get_requests_.erase(object_request_iter);
}
}
}
}
}
if (done) {
return Status::OK();
} else {
return Status::TimedOut("Get timed out: some object(s) not ready.");
}
}
Status CoreWorkerMemoryStore::Get(
const absl::flat_hash_set<ObjectID> &object_ids, int64_t timeout_ms,
const WorkerContext &ctx,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception) {
const std::vector<ObjectID> id_vector(object_ids.begin(), object_ids.end());
std::vector<std::shared_ptr<RayObject>> result_objects;
RAY_RETURN_NOT_OK(
Get(id_vector, id_vector.size(), timeout_ms, ctx, true, &result_objects));
for (size_t i = 0; i < id_vector.size(); i++) {
if (result_objects[i] != nullptr) {
(*results)[id_vector[i]] = result_objects[i];
if (result_objects[i]->IsException() && !result_objects[i]->IsInPlasmaError()) {
// Can return early if an object value contains an exception.
// InPlasmaError does not count as an exception because then the object
// value should then be found in plasma.
*got_exception = true;
}
}
}
return Status::OK();
}
Status CoreWorkerMemoryStore::Wait(const absl::flat_hash_set<ObjectID> &object_ids,
int num_objects, int64_t timeout_ms,
const WorkerContext &ctx,
absl::flat_hash_set<ObjectID> *ready) {
std::vector<ObjectID> id_vector(object_ids.begin(), object_ids.end());
std::vector<std::shared_ptr<RayObject>> result_objects;
RAY_CHECK(object_ids.size() == id_vector.size());
auto status = Get(id_vector, num_objects, timeout_ms, ctx, false, &result_objects);
// Ignore TimedOut statuses since we return ready objects explicitly.
if (!status.IsTimedOut()) {
RAY_RETURN_NOT_OK(status);
}
for (size_t i = 0; i < id_vector.size(); i++) {
if (result_objects[i] != nullptr) {
ready->insert(id_vector[i]);
}
}
return Status::OK();
}
void CoreWorkerMemoryStore::Delete(const absl::flat_hash_set<ObjectID> &object_ids,
absl::flat_hash_set<ObjectID> *plasma_ids_to_delete) {
absl::MutexLock lock(&mu_);
for (const auto &object_id : object_ids) {
auto it = objects_.find(object_id);
if (it != objects_.end()) {
if (it->second->IsInPlasmaError()) {
plasma_ids_to_delete->insert(object_id);
} else {
objects_.erase(it);
}
}
}
}
void CoreWorkerMemoryStore::Delete(const std::vector<ObjectID> &object_ids) {
absl::MutexLock lock(&mu_);
for (const auto &object_id : object_ids) {
objects_.erase(object_id);
}
}
bool CoreWorkerMemoryStore::Contains(const ObjectID &object_id, bool *in_plasma) {
absl::MutexLock lock(&mu_);
auto it = objects_.find(object_id);
if (it != objects_.end()) {
if (it->second->IsInPlasmaError()) {
*in_plasma = true;
return false;
}
return true;
}
return false;
}
MemoryStoreStats CoreWorkerMemoryStore::GetMemoryStoreStatisticalData() {
absl::MutexLock lock(&mu_);
MemoryStoreStats item;
for (const auto &it : objects_) {
if (!it.second->IsInPlasmaError()) {
item.num_local_objects += 1;
item.used_object_store_memory += it.second->GetSize();
}
}
return item;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/store_provider/memory_store/memory_store.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_MEMORY_STORE_H
#define RAY_CORE_WORKER_MEMORY_STORE_H
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/reference_count.h"
namespace ray {
struct MemoryStoreStats {
int32_t num_local_objects = 0;
int64_t used_object_store_memory = 0;
};
class GetRequest;
class CoreWorkerMemoryStore;
/// The class provides implementations for local process memory store.
/// An example usage for this is to retrieve the returned objects from direct
/// actor call (see direct_actor_transport.cc).
class CoreWorkerMemoryStore {
public:
/// Create a memory store.
///
/// \param[in] store_in_plasma If not null, this is used to spill to plasma.
/// \param[in] counter If not null, this enables ref counting for local objects,
/// and the `remove_after_get` flag for Get() will be ignored.
/// \param[in] raylet_client If not null, used to notify tasks blocked / unblocked.
CoreWorkerMemoryStore(
std::function<void(const RayObject &, const ObjectID &)> store_in_plasma = nullptr,
std::shared_ptr<ReferenceCounter> counter = nullptr,
std::shared_ptr<raylet::RayletClient> raylet_client = nullptr);
~CoreWorkerMemoryStore(){};
/// Put an object with specified ID into object store.
///
/// \param[in] object The ray object.
/// \param[in] object_id Object ID specified by user.
/// \return Status.
Status Put(const RayObject &object, const ObjectID &object_id);
/// Get a list of objects from the object store.
///
/// \param[in] object_ids IDs of the objects to get. Duplicates are not allowed.
/// \param[in] num_objects Number of objects that should appear.
/// \param[in] timeout_ms Timeout in milliseconds, wait infinitely if it's negative.
/// \param[in] ctx The current worker context.
/// \param[in] remove_after_get When to remove the objects from store after `Get`
/// finishes. This has no effect if ref counting is enabled.
/// \param[out] results Result list of objects data.
/// \return Status.
Status Get(const std::vector<ObjectID> &object_ids, int num_objects, int64_t timeout_ms,
const WorkerContext &ctx, bool remove_after_get,
std::vector<std::shared_ptr<RayObject>> *results);
/// Convenience wrapper around Get() that stores results in a given result map.
Status Get(const absl::flat_hash_set<ObjectID> &object_ids, int64_t timeout_ms,
const WorkerContext &ctx,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception);
/// Convenience wrapper around Get() that stores ready objects in a given result set.
Status Wait(const absl::flat_hash_set<ObjectID> &object_ids, int num_objects,
int64_t timeout_ms, const WorkerContext &ctx,
absl::flat_hash_set<ObjectID> *ready);
/// Asynchronously get an object from the object store. The object will not be removed
/// from storage after GetAsync (TODO(ekl): integrate this with object GC).
///
/// \param[in] object_id The object id to get.
/// \param[in] callback The callback to run with the reference to the retrieved
/// object value once available.
void GetAsync(const ObjectID &object_id,
std::function<void(std::shared_ptr<RayObject>)> callback);
/// Get a single object if available. If the object is not local yet, or if the object
/// is local but is ErrorType::OBJECT_IN_PLASMA, then nullptr will be returned, and
/// the store will ensure the object is promoted to plasma once available.
///
/// \param[in] object_id The object id to get.
/// \return pointer to the local object, or nullptr if promoted to plasma.
std::shared_ptr<RayObject> GetOrPromoteToPlasma(const ObjectID &object_id);
/// Delete a list of objects from the object store.
/// NOTE(swang): Objects that contain IsInPlasmaError will not be
/// deleted from the in-memory store. Instead, any future Get
/// calls should check with plasma to see whether the object has
/// been deleted.
///
/// \param[in] object_ids IDs of the objects to delete.
/// \param[out] plasma_ids_to_delete This will be extended to
/// include the IDs of the plasma objects to delete, based on the
/// in-memory objects that contained InPlasmaError.
/// \return Void.
void Delete(const absl::flat_hash_set<ObjectID> &object_ids,
absl::flat_hash_set<ObjectID> *plasma_ids_to_delete);
/// Delete a list of objects from the object store.
///
/// \param[in] object_ids IDs of the objects to delete.
/// \return Void.
void Delete(const std::vector<ObjectID> &object_ids);
/// Check whether this store contains the object.
///
/// \param[in] object_id The object to check.
/// \param[out] in_plasma Set to true if the object was spilled to plasma.
/// If this is set to true, Contains() will return false.
/// \return Whether the store has the object.
bool Contains(const ObjectID &object_id, bool *in_plasma);
/// Returns the number of objects in this store.
///
/// \return Count of objects in the store.
int Size() {
absl::MutexLock lock(&mu_);
return objects_.size();
}
/// Returns stats data of memory usage.
///
/// \return number of local objects and used memory size.
MemoryStoreStats GetMemoryStoreStatisticalData();
/// Returns the memory usage of this store.
///
/// \return Total size of objects in the store.
uint64_t UsedMemory();
private:
/// Optional callback for putting objects into the plasma store.
std::function<void(const RayObject &, const ObjectID &)> store_in_plasma_;
/// If enabled, holds a reference to local worker ref counter. TODO(ekl) make this
/// mandatory once Java is supported.
std::shared_ptr<ReferenceCounter> ref_counter_ = nullptr;
// If set, this will be used to notify worker blocked / unblocked on get calls.
std::shared_ptr<raylet::RayletClient> raylet_client_ = nullptr;
/// Protects the data structures below.
absl::Mutex mu_;
/// Set of objects that should be promoted to plasma once available.
absl::flat_hash_set<ObjectID> promoted_to_plasma_ GUARDED_BY(mu_);
/// Map from object ID to `RayObject`.
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> objects_ GUARDED_BY(mu_);
/// Map from object ID to its get requests.
absl::flat_hash_map<ObjectID, std::vector<std::shared_ptr<GetRequest>>>
object_get_requests_ GUARDED_BY(mu_);
/// Map from object ID to its async get requests.
absl::flat_hash_map<ObjectID,
std::vector<std::function<void(std::shared_ptr<RayObject>)>>>
object_async_get_requests_ GUARDED_BY(mu_);
};
} // namespace ray
#endif // RAY_CORE_WORKER_MEMORY_STORE_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/store_provider/plasma_store_provider.cc
|
C++
|
#include "ray/core_worker/store_provider/plasma_store_provider.h"
#include "ray/common/ray_config.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/core_worker.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
CoreWorkerPlasmaStoreProvider::CoreWorkerPlasmaStoreProvider(
const std::string &store_socket,
const std::shared_ptr<raylet::RayletClient> raylet_client,
std::function<Status()> check_signals)
: raylet_client_(raylet_client) {
check_signals_ = check_signals;
RAY_ARROW_CHECK_OK(store_client_.Connect(store_socket));
}
CoreWorkerPlasmaStoreProvider::~CoreWorkerPlasmaStoreProvider() {
RAY_IGNORE_EXPR(store_client_.Disconnect());
}
Status CoreWorkerPlasmaStoreProvider::SetClientOptions(std::string name,
int64_t limit_bytes) {
std::lock_guard<std::mutex> guard(store_client_mutex_);
RAY_ARROW_RETURN_NOT_OK(store_client_.SetClientOptions(name, limit_bytes));
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::Put(const RayObject &object,
const ObjectID &object_id) {
RAY_CHECK(!object.IsInPlasmaError()) << object_id;
std::shared_ptr<Buffer> data;
RAY_RETURN_NOT_OK(Create(object.GetMetadata(),
object.HasData() ? object.GetData()->Size() : 0, object_id,
&data));
// data could be a nullptr if the ObjectID already existed, but this does
// not throw an error.
if (data != nullptr) {
if (object.HasData()) {
memcpy(data->Data(), object.GetData()->Data(), object.GetData()->Size());
}
RAY_RETURN_NOT_OK(Seal(object_id));
}
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::Create(const std::shared_ptr<Buffer> &metadata,
const size_t data_size,
const ObjectID &object_id,
std::shared_ptr<Buffer> *data) {
auto plasma_id = object_id.ToPlasmaId();
std::shared_ptr<arrow::Buffer> arrow_buffer;
{
std::lock_guard<std::mutex> guard(store_client_mutex_);
arrow::Status status =
store_client_.Create(plasma_id, data_size, metadata ? metadata->Data() : nullptr,
metadata ? metadata->Size() : 0, &arrow_buffer);
if (plasma::IsPlasmaObjectExists(status)) {
RAY_LOG(WARNING) << "Trying to put an object that already existed in plasma: "
<< object_id << ".";
return Status::OK();
}
if (plasma::IsPlasmaStoreFull(status)) {
std::ostringstream message;
message << "Failed to put object " << object_id << " in object store because it "
<< "is full. Object size is " << data_size << " bytes.";
return Status::ObjectStoreFull(message.str());
}
RAY_ARROW_RETURN_NOT_OK(status);
}
*data = std::make_shared<PlasmaBuffer>(PlasmaBuffer(arrow_buffer));
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::Seal(const ObjectID &object_id) {
auto plasma_id = object_id.ToPlasmaId();
{
std::lock_guard<std::mutex> guard(store_client_mutex_);
RAY_ARROW_RETURN_NOT_OK(store_client_.Seal(plasma_id));
RAY_ARROW_RETURN_NOT_OK(store_client_.Release(plasma_id));
}
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::FetchAndGetFromPlasmaStore(
absl::flat_hash_set<ObjectID> &remaining, const std::vector<ObjectID> &batch_ids,
int64_t timeout_ms, bool fetch_only, bool in_direct_call, const TaskID &task_id,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception) {
RAY_RETURN_NOT_OK(raylet_client_->FetchOrReconstruct(
batch_ids, fetch_only, /*mark_worker_blocked*/ !in_direct_call, task_id));
std::vector<plasma::ObjectID> plasma_batch_ids;
plasma_batch_ids.reserve(batch_ids.size());
for (size_t i = 0; i < batch_ids.size(); i++) {
plasma_batch_ids.push_back(batch_ids[i].ToPlasmaId());
}
std::vector<plasma::ObjectBuffer> plasma_results;
{
std::lock_guard<std::mutex> guard(store_client_mutex_);
RAY_ARROW_RETURN_NOT_OK(
store_client_.Get(plasma_batch_ids, timeout_ms, &plasma_results));
}
// Add successfully retrieved objects to the result map and remove them from
// the set of IDs to get.
for (size_t i = 0; i < plasma_results.size(); i++) {
if (plasma_results[i].data != nullptr || plasma_results[i].metadata != nullptr) {
const auto &object_id = batch_ids[i];
std::shared_ptr<PlasmaBuffer> data = nullptr;
std::shared_ptr<PlasmaBuffer> metadata = nullptr;
if (plasma_results[i].data && plasma_results[i].data->size()) {
data = std::make_shared<PlasmaBuffer>(plasma_results[i].data);
}
if (plasma_results[i].metadata && plasma_results[i].metadata->size()) {
metadata = std::make_shared<PlasmaBuffer>(plasma_results[i].metadata);
}
const auto result_object = std::make_shared<RayObject>(data, metadata);
(*results)[object_id] = result_object;
remaining.erase(object_id);
if (result_object->IsException()) {
RAY_CHECK(!result_object->IsInPlasmaError());
*got_exception = true;
}
}
}
return Status::OK();
}
Status UnblockIfNeeded(const std::shared_ptr<raylet::RayletClient> &client,
const WorkerContext &ctx) {
if (ctx.CurrentTaskIsDirectCall()) {
if (ctx.ShouldReleaseResourcesOnBlockingCalls()) {
return client->NotifyDirectCallTaskUnblocked();
} else {
return Status::OK(); // We don't need to release resources.
}
} else {
return client->NotifyUnblocked(ctx.GetCurrentTaskID());
}
}
Status CoreWorkerPlasmaStoreProvider::Get(
const absl::flat_hash_set<ObjectID> &object_ids, int64_t timeout_ms,
const WorkerContext &ctx,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception) {
int64_t batch_size = RayConfig::instance().worker_fetch_request_size();
std::vector<ObjectID> batch_ids;
absl::flat_hash_set<ObjectID> remaining(object_ids.begin(), object_ids.end());
// First, attempt to fetch all of the required objects once without reconstructing.
std::vector<ObjectID> id_vector(object_ids.begin(), object_ids.end());
int64_t total_size = static_cast<int64_t>(object_ids.size());
for (int64_t start = 0; start < total_size; start += batch_size) {
batch_ids.clear();
for (int64_t i = start; i < batch_size && i < total_size; i++) {
batch_ids.push_back(id_vector[start + i]);
}
RAY_RETURN_NOT_OK(
FetchAndGetFromPlasmaStore(remaining, batch_ids, /*timeout_ms=*/0,
/*fetch_only=*/true, ctx.CurrentTaskIsDirectCall(),
ctx.GetCurrentTaskID(), results, got_exception));
}
// If all objects were fetched already, return.
if (remaining.empty() || *got_exception) {
return Status::OK();
}
// If not all objects were successfully fetched, repeatedly call FetchOrReconstruct and
// Get from the local object store in batches. This loop will run indefinitely until the
// objects are all fetched if timeout is -1.
int unsuccessful_attempts = 0;
bool should_break = false;
bool timed_out = false;
int64_t remaining_timeout = timeout_ms;
while (!remaining.empty() && !should_break) {
batch_ids.clear();
for (const auto &id : remaining) {
if (int64_t(batch_ids.size()) == batch_size) {
break;
}
batch_ids.push_back(id);
}
int64_t batch_timeout = std::max(RayConfig::instance().get_timeout_milliseconds(),
int64_t(10 * batch_ids.size()));
if (remaining_timeout >= 0) {
batch_timeout = std::min(remaining_timeout, batch_timeout);
remaining_timeout -= batch_timeout;
timed_out = remaining_timeout <= 0;
}
size_t previous_size = remaining.size();
// This is a separate IPC from the FetchAndGet in direct call mode.
if (ctx.CurrentTaskIsDirectCall() && ctx.ShouldReleaseResourcesOnBlockingCalls()) {
RAY_RETURN_NOT_OK(raylet_client_->NotifyDirectCallTaskBlocked());
}
RAY_RETURN_NOT_OK(
FetchAndGetFromPlasmaStore(remaining, batch_ids, batch_timeout,
/*fetch_only=*/false, ctx.CurrentTaskIsDirectCall(),
ctx.GetCurrentTaskID(), results, got_exception));
should_break = timed_out || *got_exception;
if ((previous_size - remaining.size()) < batch_ids.size()) {
unsuccessful_attempts++;
WarnIfAttemptedTooManyTimes(unsuccessful_attempts, remaining);
}
if (check_signals_) {
Status status = check_signals_();
if (!status.ok()) {
// TODO(edoakes): in this case which status should we return?
RAY_RETURN_NOT_OK(UnblockIfNeeded(raylet_client_, ctx));
return status;
}
}
}
if (!remaining.empty() && timed_out) {
RAY_RETURN_NOT_OK(UnblockIfNeeded(raylet_client_, ctx));
return Status::TimedOut("Get timed out: some object(s) not ready.");
}
// Notify unblocked because we blocked when calling FetchOrReconstruct with
// fetch_only=false.
return UnblockIfNeeded(raylet_client_, ctx);
}
Status CoreWorkerPlasmaStoreProvider::Contains(const ObjectID &object_id,
bool *has_object) {
std::lock_guard<std::mutex> guard(store_client_mutex_);
RAY_ARROW_RETURN_NOT_OK(store_client_.Contains(object_id.ToPlasmaId(), has_object));
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::Wait(
const absl::flat_hash_set<ObjectID> &object_ids, int num_objects, int64_t timeout_ms,
const WorkerContext &ctx, absl::flat_hash_set<ObjectID> *ready) {
std::vector<ObjectID> id_vector(object_ids.begin(), object_ids.end());
bool should_break = false;
int64_t remaining_timeout = timeout_ms;
while (!should_break) {
WaitResultPair result_pair;
int64_t call_timeout = RayConfig::instance().get_timeout_milliseconds();
if (remaining_timeout >= 0) {
call_timeout = std::min(remaining_timeout, call_timeout);
remaining_timeout -= call_timeout;
should_break = remaining_timeout <= 0;
}
// This is a separate IPC from the Wait in direct call mode.
if (ctx.CurrentTaskIsDirectCall() && ctx.ShouldReleaseResourcesOnBlockingCalls()) {
RAY_RETURN_NOT_OK(raylet_client_->NotifyDirectCallTaskBlocked());
}
RAY_RETURN_NOT_OK(
raylet_client_->Wait(id_vector, num_objects, call_timeout, /*wait_local*/ true,
/*mark_worker_blocked*/ !ctx.CurrentTaskIsDirectCall(),
ctx.GetCurrentTaskID(), &result_pair));
if (result_pair.first.size() >= static_cast<size_t>(num_objects)) {
should_break = true;
}
for (const auto &entry : result_pair.first) {
ready->insert(entry);
}
if (check_signals_) {
RAY_RETURN_NOT_OK(check_signals_());
}
}
if (ctx.CurrentTaskIsDirectCall() && ctx.ShouldReleaseResourcesOnBlockingCalls()) {
RAY_RETURN_NOT_OK(raylet_client_->NotifyDirectCallTaskUnblocked());
}
return Status::OK();
}
Status CoreWorkerPlasmaStoreProvider::Delete(
const absl::flat_hash_set<ObjectID> &object_ids, bool local_only,
bool delete_creating_tasks) {
std::vector<ObjectID> object_id_vector(object_ids.begin(), object_ids.end());
return raylet_client_->FreeObjects(object_id_vector, local_only, delete_creating_tasks);
}
std::string CoreWorkerPlasmaStoreProvider::MemoryUsageString() {
std::lock_guard<std::mutex> guard(store_client_mutex_);
return store_client_.DebugString();
}
void CoreWorkerPlasmaStoreProvider::WarnIfAttemptedTooManyTimes(
int num_attempts, const absl::flat_hash_set<ObjectID> &remaining) {
if (num_attempts % RayConfig::instance().object_store_get_warn_per_num_attempts() ==
0) {
std::ostringstream oss;
size_t printed = 0;
for (auto &id : remaining) {
if (printed >=
RayConfig::instance().object_store_get_max_ids_to_print_in_warning()) {
break;
}
if (printed > 0) {
oss << ", ";
}
oss << id.Hex();
}
if (printed < remaining.size()) {
oss << ", etc";
}
RAY_LOG(WARNING)
<< "Attempted " << num_attempts << " times to reconstruct objects, but "
<< "some objects are still unavailable. If this message continues to print,"
<< " it may indicate that object's creating task is hanging, or something wrong"
<< " happened in raylet backend. " << remaining.size()
<< " object(s) pending: " << oss.str() << ".";
}
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/store_provider/plasma_store_provider.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_PLASMA_STORE_PROVIDER_H
#define RAY_CORE_WORKER_PLASMA_STORE_PROVIDER_H
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "plasma/client.h"
#include "ray/common/buffer.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/core_worker/common.h"
#include "ray/core_worker/context.h"
#include "ray/raylet/raylet_client.h"
namespace ray {
/// The class provides implementations for accessing plasma store, which includes both
/// local and remote stores. Local access goes is done via a
/// CoreWorkerLocalPlasmaStoreProvider and remote access goes through the raylet.
/// See `CoreWorkerStoreProvider` for the semantics of public methods.
class CoreWorkerPlasmaStoreProvider {
public:
CoreWorkerPlasmaStoreProvider(const std::string &store_socket,
const std::shared_ptr<raylet::RayletClient> raylet_client,
std::function<Status()> check_signals);
~CoreWorkerPlasmaStoreProvider();
Status SetClientOptions(std::string name, int64_t limit_bytes);
Status Put(const RayObject &object, const ObjectID &object_id);
Status Create(const std::shared_ptr<Buffer> &metadata, const size_t data_size,
const ObjectID &object_id, std::shared_ptr<Buffer> *data);
Status Seal(const ObjectID &object_id);
Status Get(const absl::flat_hash_set<ObjectID> &object_ids, int64_t timeout_ms,
const WorkerContext &ctx,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception);
Status Contains(const ObjectID &object_id, bool *has_object);
Status Wait(const absl::flat_hash_set<ObjectID> &object_ids, int num_objects,
int64_t timeout_ms, const WorkerContext &ctx,
absl::flat_hash_set<ObjectID> *ready);
Status Delete(const absl::flat_hash_set<ObjectID> &object_ids, bool local_only,
bool delete_creating_tasks);
std::string MemoryUsageString();
private:
/// Ask the raylet to fetch a set of objects and then attempt to get them
/// from the local plasma store. Successfully fetched objects will be removed
/// from the input set of remaining IDs and added to the results map.
///
/// \param[in/out] remaining IDs of the remaining objects to get.
/// \param[in] batch_ids IDs of the objects to get.
/// \param[in] timeout_ms Timeout in milliseconds.
/// \param[in] fetch_only Whether the raylet should only fetch or also attempt to
/// reconstruct objects.
/// \param[in] in_direct_call_task Whether the current task is direct call.
/// \param[in] task_id The current TaskID.
/// \param[out] results Map of objects to write results into. This method will only
/// add to this map, not clear or remove from it, so the caller can pass in a non-empty
/// map.
/// \param[out] got_exception Set to true if any of the fetched objects contained an
/// exception.
/// \return Status.
Status FetchAndGetFromPlasmaStore(
absl::flat_hash_set<ObjectID> &remaining, const std::vector<ObjectID> &batch_ids,
int64_t timeout_ms, bool fetch_only, bool in_direct_call_task,
const TaskID &task_id,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> *results,
bool *got_exception);
/// Print a warning if we've attempted too many times, but some objects are still
/// unavailable. Only the keys in the 'remaining' map are used.
///
/// \param[in] num_attemps The number of attempted times.
/// \param[in] remaining The remaining objects.
static void WarnIfAttemptedTooManyTimes(int num_attempts,
const absl::flat_hash_set<ObjectID> &remaining);
const std::shared_ptr<raylet::RayletClient> raylet_client_;
plasma::PlasmaClient store_client_;
std::mutex store_client_mutex_;
std::function<Status()> check_signals_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_PLASMA_STORE_PROVIDER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/task_manager.cc
|
C++
|
#include "ray/core_worker/task_manager.h"
#include "ray/util/util.h"
namespace ray {
// Start throttling task failure logs once we hit this threshold.
const int64_t kTaskFailureThrottlingThreshold = 50;
// Throttle task failure logs to once this interval.
const int64_t kTaskFailureLoggingFrequencyMillis = 5000;
void TaskManager::AddPendingTask(const TaskID &caller_id,
const rpc::Address &caller_address,
const TaskSpecification &spec, int max_retries) {
RAY_LOG(DEBUG) << "Adding pending task " << spec.TaskId();
absl::MutexLock lock(&mu_);
std::pair<TaskSpecification, int> entry = {spec, max_retries};
RAY_CHECK(pending_tasks_.emplace(spec.TaskId(), std::move(entry)).second);
// Add references for the dependencies to the task.
std::vector<ObjectID> task_deps;
for (size_t i = 0; i < spec.NumArgs(); i++) {
if (spec.ArgByRef(i)) {
for (size_t j = 0; j < spec.ArgIdCount(i); j++) {
task_deps.push_back(spec.ArgId(i, j));
}
}
}
reference_counter_->AddSubmittedTaskReferences(task_deps);
// Add new owned objects for the return values of the task.
size_t num_returns = spec.NumReturns();
if (spec.IsActorCreationTask() || spec.IsActorTask()) {
num_returns--;
}
for (size_t i = 0; i < num_returns; i++) {
reference_counter_->AddOwnedObject(spec.ReturnId(i, TaskTransportType::DIRECT),
caller_id, caller_address);
}
}
void TaskManager::DrainAndShutdown(std::function<void()> shutdown) {
absl::MutexLock lock(&mu_);
if (pending_tasks_.empty()) {
shutdown();
} else {
RAY_LOG(WARNING)
<< "This worker is still managing " << pending_tasks_.size()
<< " in flight tasks, waiting for them to finish before shutting down.";
}
shutdown_hook_ = shutdown;
}
bool TaskManager::IsTaskPending(const TaskID &task_id) const {
absl::MutexLock lock(&mu_);
return pending_tasks_.count(task_id) > 0;
}
void TaskManager::CompletePendingTask(const TaskID &task_id,
const rpc::PushTaskReply &reply,
const rpc::Address *actor_addr) {
RAY_LOG(DEBUG) << "Completing task " << task_id;
TaskSpecification spec;
{
absl::MutexLock lock(&mu_);
auto it = pending_tasks_.find(task_id);
RAY_CHECK(it != pending_tasks_.end())
<< "Tried to complete task that was not pending " << task_id;
spec = it->second.first;
pending_tasks_.erase(it);
}
RemovePlasmaSubmittedTaskReferences(spec);
for (int i = 0; i < reply.return_objects_size(); i++) {
const auto &return_object = reply.return_objects(i);
ObjectID object_id = ObjectID::FromBinary(return_object.object_id());
if (return_object.in_plasma()) {
// Mark it as in plasma with a dummy object.
RAY_CHECK_OK(
in_memory_store_->Put(RayObject(rpc::ErrorType::OBJECT_IN_PLASMA), object_id));
} else {
std::shared_ptr<LocalMemoryBuffer> data_buffer;
if (return_object.data().size() > 0) {
data_buffer = std::make_shared<LocalMemoryBuffer>(
const_cast<uint8_t *>(
reinterpret_cast<const uint8_t *>(return_object.data().data())),
return_object.data().size());
}
std::shared_ptr<LocalMemoryBuffer> metadata_buffer;
if (return_object.metadata().size() > 0) {
metadata_buffer = std::make_shared<LocalMemoryBuffer>(
const_cast<uint8_t *>(
reinterpret_cast<const uint8_t *>(return_object.metadata().data())),
return_object.metadata().size());
}
RAY_CHECK_OK(
in_memory_store_->Put(RayObject(data_buffer, metadata_buffer), object_id));
}
}
ShutdownIfNeeded();
}
void TaskManager::PendingTaskFailed(const TaskID &task_id, rpc::ErrorType error_type,
Status *status) {
// Note that this might be the __ray_terminate__ task, so we don't log
// loudly with ERROR here.
RAY_LOG(DEBUG) << "Task " << task_id << " failed with error "
<< rpc::ErrorType_Name(error_type);
int num_retries_left = 0;
TaskSpecification spec;
{
absl::MutexLock lock(&mu_);
auto it = pending_tasks_.find(task_id);
RAY_CHECK(it != pending_tasks_.end())
<< "Tried to complete task that was not pending " << task_id;
spec = it->second.first;
num_retries_left = it->second.second;
if (num_retries_left == 0) {
pending_tasks_.erase(it);
} else {
RAY_CHECK(num_retries_left > 0);
it->second.second--;
}
}
// We should not hold the lock during these calls because they may trigger
// callbacks in this or other classes.
if (num_retries_left > 0) {
RAY_LOG(ERROR) << num_retries_left << " retries left for task " << spec.TaskId()
<< ", attempting to resubmit.";
retry_task_callback_(spec);
} else {
// Throttled logging of task failure errors.
{
absl::MutexLock lock(&mu_);
auto debug_str = spec.DebugString();
if (debug_str.find("__ray_terminate__") == std::string::npos &&
(num_failure_logs_ < kTaskFailureThrottlingThreshold ||
(current_time_ms() - last_log_time_ms_) >
kTaskFailureLoggingFrequencyMillis)) {
if (num_failure_logs_++ == kTaskFailureThrottlingThreshold) {
RAY_LOG(ERROR) << "Too many failure logs, throttling to once every "
<< kTaskFailureLoggingFrequencyMillis << " millis.";
}
last_log_time_ms_ = current_time_ms();
if (status != nullptr) {
RAY_LOG(ERROR) << "Task failed: " << *status << ": " << spec.DebugString();
} else {
RAY_LOG(ERROR) << "Task failed: " << spec.DebugString();
}
}
}
RemovePlasmaSubmittedTaskReferences(spec);
MarkPendingTaskFailed(task_id, spec, error_type);
}
ShutdownIfNeeded();
}
void TaskManager::ShutdownIfNeeded() {
absl::MutexLock lock(&mu_);
if (shutdown_hook_ && pending_tasks_.empty()) {
RAY_LOG(WARNING) << "All in flight tasks finished, shutting down worker.";
shutdown_hook_();
}
}
void TaskManager::RemoveSubmittedTaskReferences(const std::vector<ObjectID> &object_ids) {
std::vector<ObjectID> deleted;
reference_counter_->RemoveSubmittedTaskReferences(object_ids, &deleted);
in_memory_store_->Delete(deleted);
}
void TaskManager::OnTaskDependenciesInlined(const std::vector<ObjectID> &object_ids) {
RemoveSubmittedTaskReferences(object_ids);
}
void TaskManager::RemovePlasmaSubmittedTaskReferences(TaskSpecification &spec) {
std::vector<ObjectID> plasma_dependencies;
for (size_t i = 0; i < spec.NumArgs(); i++) {
auto count = spec.ArgIdCount(i);
if (count > 0) {
const auto &id = spec.ArgId(i, 0);
plasma_dependencies.push_back(id);
}
}
RemoveSubmittedTaskReferences(plasma_dependencies);
}
void TaskManager::MarkPendingTaskFailed(const TaskID &task_id,
const TaskSpecification &spec,
rpc::ErrorType error_type) {
RAY_LOG(DEBUG) << "Treat task as failed. task_id: " << task_id
<< ", error_type: " << ErrorType_Name(error_type);
int64_t num_returns = spec.NumReturns();
for (int i = 0; i < num_returns; i++) {
const auto object_id = ObjectID::ForTaskReturn(
task_id, /*index=*/i + 1,
/*transport_type=*/static_cast<int>(TaskTransportType::DIRECT));
RAY_CHECK_OK(in_memory_store_->Put(RayObject(error_type), object_id));
}
if (spec.IsActorCreationTask()) {
// Publish actor death if actor creation task failed after
// a number of retries.
actor_manager_->PublishTerminatedActor(spec);
}
}
TaskSpecification TaskManager::GetTaskSpec(const TaskID &task_id) const {
absl::MutexLock lock(&mu_);
auto it = pending_tasks_.find(task_id);
RAY_CHECK(it != pending_tasks_.end());
return it->second.first;
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/task_manager.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_TASK_MANAGER_H
#define RAY_CORE_WORKER_TASK_MANAGER_H
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "ray/common/id.h"
#include "ray/common/task/task.h"
#include "ray/core_worker/actor_manager.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/protobuf/core_worker.pb.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
class TaskFinisherInterface {
public:
virtual void CompletePendingTask(const TaskID &task_id, const rpc::PushTaskReply &reply,
const rpc::Address *actor_addr) = 0;
virtual void PendingTaskFailed(const TaskID &task_id, rpc::ErrorType error_type,
Status *status = nullptr) = 0;
virtual void OnTaskDependenciesInlined(const std::vector<ObjectID> &object_ids) = 0;
virtual ~TaskFinisherInterface() {}
};
using RetryTaskCallback = std::function<void(const TaskSpecification &spec)>;
class TaskManager : public TaskFinisherInterface {
public:
TaskManager(std::shared_ptr<CoreWorkerMemoryStore> in_memory_store,
std::shared_ptr<ReferenceCounter> reference_counter,
std::shared_ptr<ActorManagerInterface> actor_manager,
RetryTaskCallback retry_task_callback)
: in_memory_store_(in_memory_store),
reference_counter_(reference_counter),
actor_manager_(actor_manager),
retry_task_callback_(retry_task_callback) {}
/// Add a task that is pending execution.
///
/// \param[in] caller_id The TaskID of the calling task.
/// \param[in] caller_address The rpc address of the calling task.
/// \param[in] spec The spec of the pending task.
/// \param[in] max_retries Number of times this task may be retried
/// on failure.
/// \return Void.
void AddPendingTask(const TaskID &caller_id, const rpc::Address &caller_address,
const TaskSpecification &spec, int max_retries = 0);
/// Wait for all pending tasks to finish, and then shutdown.
///
/// \param shutdown The shutdown callback to call.
void DrainAndShutdown(std::function<void()> shutdown);
/// Return whether the task is pending.
///
/// \param[in] task_id ID of the task to query.
/// \return Whether the task is pending.
bool IsTaskPending(const TaskID &task_id) const;
/// Write return objects for a pending task to the memory store.
///
/// \param[in] task_id ID of the pending task.
/// \param[in] reply Proto response to a direct actor or task call.
/// \param[in] actor_addr Address of the created actor, or nullptr.
/// \return Void.
void CompletePendingTask(const TaskID &task_id, const rpc::PushTaskReply &reply,
const rpc::Address *actor_addr) override;
/// A pending task failed. This will either retry the task or mark the task
/// as failed if there are no retries left.
///
/// \param[in] task_id ID of the pending task.
/// \param[in] error_type The type of the specific error.
/// \param[in] status Optional status message.
void PendingTaskFailed(const TaskID &task_id, rpc::ErrorType error_type,
Status *status = nullptr) override;
void OnTaskDependenciesInlined(const std::vector<ObjectID> &object_id) override;
/// Return the spec for a pending task.
TaskSpecification GetTaskSpec(const TaskID &task_id) const;
/// Return the number of pending tasks.
int NumPendingTasks() const { return pending_tasks_.size(); }
private:
/// Treat a pending task as failed. The lock should not be held when calling
/// this method because it may trigger callbacks in this or other classes.
void MarkPendingTaskFailed(const TaskID &task_id, const TaskSpecification &spec,
rpc::ErrorType error_type) LOCKS_EXCLUDED(mu_);
/// Remove submittted task references in the reference counter for the object IDs.
/// If their reference counts reach zero, they are deleted from the in-memory store.
void RemoveSubmittedTaskReferences(const std::vector<ObjectID> &object_ids);
/// Helper function to call RemoveSubmittedTaskReferences on the plasma dependencies
/// of the given task spec.
void RemovePlasmaSubmittedTaskReferences(TaskSpecification &spec);
/// Shutdown if all tasks are finished and shutdown is scheduled.
void ShutdownIfNeeded() LOCKS_EXCLUDED(mu_);
/// Used to store task results.
std::shared_ptr<CoreWorkerMemoryStore> in_memory_store_;
/// Used for reference counting objects.
/// The task manager is responsible for managing all references related to
/// submitted tasks (dependencies and return objects).
std::shared_ptr<ReferenceCounter> reference_counter_;
// Interface for publishing actor creation.
std::shared_ptr<ActorManagerInterface> actor_manager_;
/// Called when a task should be retried.
const RetryTaskCallback retry_task_callback_;
// The number of task failures we have logged total.
int64_t num_failure_logs_ GUARDED_BY(mu_) = 0;
// The last time we logged a task failure.
int64_t last_log_time_ms_ GUARDED_BY(mu_) = 0;
/// Protects below fields.
mutable absl::Mutex mu_;
/// Map from task ID to a pair of:
/// {task spec, number of allowed retries left}
/// This map contains one entry per pending task that we submitted.
/// TODO(swang): The TaskSpec protobuf must be copied into the
/// PushTaskRequest protobuf when sent to a worker so that we can retry it if
/// the worker fails. We could avoid this by either not caching the full
/// TaskSpec for tasks that cannot be retried (e.g., actor tasks), or by
/// storing a shared_ptr to a PushTaskRequest protobuf for all tasks.
absl::flat_hash_map<TaskID, std::pair<TaskSpecification, int>> pending_tasks_
GUARDED_BY(mu_);
/// Optional shutdown hook to call when pending tasks all finish.
std::function<void()> shutdown_hook_ GUARDED_BY(mu_) = nullptr;
};
} // namespace ray
#endif // RAY_CORE_WORKER_TASK_MANAGER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/core_worker_test.cc
|
C++
|
#include "ray/core_worker/core_worker.h"
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include <thread>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
#include "ray/common/buffer.h"
#include "ray/common/ray_object.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
#include "ray/raylet/raylet_client.h"
#include "ray/util/test_util.h"
#include "src/ray/protobuf/core_worker.pb.h"
#include "src/ray/protobuf/gcs.pb.h"
#include "src/ray/util/test_util.h"
namespace {
std::string store_executable;
std::string raylet_executable;
int node_manager_port = 0;
std::string raylet_monitor_executable;
std::string mock_worker_executable;
} // namespace
namespace ray {
static void flushall_redis(void) {
redisContext *context = redisConnect("127.0.0.1", 6379);
freeReplyObject(redisCommand(context, "FLUSHALL"));
freeReplyObject(redisCommand(context, "SET NumRedisShards 1"));
freeReplyObject(redisCommand(context, "LPUSH RedisShards 127.0.0.1:6380"));
redisFree(context);
}
ActorID CreateActorHelper(CoreWorker &worker,
std::unordered_map<std::string, double> &resources,
bool is_direct_call, uint64_t max_reconstructions) {
std::unique_ptr<ActorHandle> actor_handle;
uint8_t array[] = {1, 2, 3};
auto buffer = std::make_shared<LocalMemoryBuffer>(array, sizeof(array));
RayFunction func(ray::Language::PYTHON, {"actor creation task"});
std::vector<TaskArg> args;
args.emplace_back(TaskArg::PassByValue(std::make_shared<RayObject>(buffer, nullptr)));
ActorCreationOptions actor_options{
max_reconstructions, is_direct_call,
/*max_concurrency*/ 1, resources, resources, {},
/*is_detached*/ false, /*is_asyncio*/ false};
// Create an actor.
ActorID actor_id;
RAY_CHECK_OK(worker.CreateActor(func, args, actor_options, &actor_id));
return actor_id;
}
std::string MetadataToString(std::shared_ptr<RayObject> obj) {
auto metadata = obj->GetMetadata();
return std::string(reinterpret_cast<const char *>(metadata->Data()), metadata->Size());
}
class CoreWorkerTest : public ::testing::Test {
public:
CoreWorkerTest(int num_nodes) : gcs_options_("127.0.0.1", 6379, "") {
// flush redis first.
flushall_redis();
RAY_CHECK(num_nodes >= 0);
if (num_nodes > 0) {
raylet_socket_names_.resize(num_nodes);
raylet_store_socket_names_.resize(num_nodes);
}
// start plasma store.
for (auto &store_socket : raylet_store_socket_names_) {
store_socket = StartStore();
}
// core worker test relies on node resources. It's important that one raylet can
// receive the heartbeat from another. So starting raylet monitor is required here.
raylet_monitor_pid_ = StartRayletMonitor("127.0.0.1");
// start raylet on each node. Assign each node with different resources so that
// a task can be scheduled to the desired node.
for (int i = 0; i < num_nodes; i++) {
raylet_socket_names_[i] =
StartRaylet(raylet_store_socket_names_[i], "127.0.0.1", node_manager_port + i,
"127.0.0.1", "\"CPU,4.0,resource" + std::to_string(i) + ",10\"");
}
}
~CoreWorkerTest() {
for (const auto &raylet_socket : raylet_socket_names_) {
StopRaylet(raylet_socket);
}
for (const auto &store_socket : raylet_store_socket_names_) {
StopStore(store_socket);
}
if (!raylet_monitor_pid_.empty()) {
StopRayletMonitor(raylet_monitor_pid_);
}
}
JobID NextJobId() const {
static uint32_t job_counter = 1;
return JobID::FromInt(job_counter++);
}
std::string StartStore() {
std::string store_socket_name = "/tmp/store" + ObjectID::FromRandom().Hex();
std::string store_pid = store_socket_name + ".pid";
std::string plasma_command = store_executable + " -m 10000000 -s " +
store_socket_name +
" 1> /dev/null 2> /dev/null & echo $! > " + store_pid;
RAY_LOG(DEBUG) << plasma_command;
RAY_CHECK(system(plasma_command.c_str()) == 0);
usleep(200 * 1000);
return store_socket_name;
}
void StopStore(std::string store_socket_name) {
std::string store_pid = store_socket_name + ".pid";
std::string kill_9 = "kill -9 `cat " + store_pid + "`";
RAY_LOG(DEBUG) << kill_9;
ASSERT_EQ(system(kill_9.c_str()), 0);
ASSERT_EQ(system(("rm -rf " + store_socket_name).c_str()), 0);
ASSERT_EQ(system(("rm -rf " + store_socket_name + ".pid").c_str()), 0);
}
std::string StartRaylet(std::string store_socket_name, std::string node_ip_address,
int port, std::string redis_address, std::string resource) {
std::string raylet_socket_name = "/tmp/raylet" + ObjectID::FromRandom().Hex();
std::string ray_start_cmd = raylet_executable;
ray_start_cmd.append(" --raylet_socket_name=" + raylet_socket_name)
.append(" --store_socket_name=" + store_socket_name)
.append(" --object_manager_port=0 --node_manager_port=" + std::to_string(port))
.append(" --node_ip_address=" + node_ip_address)
.append(" --redis_address=" + redis_address)
.append(" --redis_port=6379")
.append(" --num_initial_workers=1")
.append(" --maximum_startup_concurrency=10")
.append(" --static_resource_list=" + resource)
.append(" --python_worker_command=\"" + mock_worker_executable + " " +
store_socket_name + " " + raylet_socket_name + " " +
std::to_string(port) + "\"")
.append(" --config_list=initial_reconstruction_timeout_milliseconds,2000")
.append(" & echo $! > " + raylet_socket_name + ".pid");
RAY_LOG(DEBUG) << "Ray Start command: " << ray_start_cmd;
RAY_CHECK(system(ray_start_cmd.c_str()) == 0);
usleep(200 * 1000);
return raylet_socket_name;
}
void StopRaylet(std::string raylet_socket_name) {
std::string raylet_pid = raylet_socket_name + ".pid";
std::string kill_9 = "kill -9 `cat " + raylet_pid + "`";
RAY_LOG(DEBUG) << kill_9;
ASSERT_TRUE(system(kill_9.c_str()) == 0);
ASSERT_TRUE(system(("rm -rf " + raylet_socket_name).c_str()) == 0);
ASSERT_TRUE(system(("rm -rf " + raylet_socket_name + ".pid").c_str()) == 0);
}
std::string StartRayletMonitor(std::string redis_address) {
std::string raylet_monitor_pid =
"/tmp/raylet_monitor" + ObjectID::FromRandom().Hex() + ".pid";
std::string raylet_monitor_start_cmd = raylet_monitor_executable;
raylet_monitor_start_cmd.append(" --redis_address=" + redis_address)
.append(" --redis_port=6379")
.append(" & echo $! > " + raylet_monitor_pid);
RAY_LOG(DEBUG) << "Raylet monitor Start command: " << raylet_monitor_start_cmd;
RAY_CHECK(system(raylet_monitor_start_cmd.c_str()) == 0);
usleep(200 * 1000);
return raylet_monitor_pid;
}
void StopRayletMonitor(std::string raylet_monitor_pid) {
std::string kill_9 = "kill -9 `cat " + raylet_monitor_pid + "`";
RAY_LOG(DEBUG) << kill_9;
ASSERT_TRUE(system(kill_9.c_str()) == 0);
}
void SetUp() {}
void TearDown() {}
// Test normal tasks.
void TestNormalTask(std::unordered_map<std::string, double> &resources);
// Test actor tasks.
void TestActorTask(std::unordered_map<std::string, double> &resources,
bool is_direct_call);
// Test actor failure case, verify that the tasks would either succeed or
// fail with exceptions, in that case the return objects fetched from `Get`
// contain errors.
void TestActorFailure(std::unordered_map<std::string, double> &resources,
bool is_direct_call);
// Test actor failover case. Verify that actor can be reconstructed successfully,
// and as long as we wait for actor reconstruction before submitting new tasks,
// it is guaranteed that all tasks are successfully completed.
void TestActorReconstruction(std::unordered_map<std::string, double> &resources,
bool is_direct_call);
protected:
bool WaitForDirectCallActorState(CoreWorker &worker, const ActorID &actor_id,
bool wait_alive, int timeout_ms);
// Get the pid for the worker process that runs the actor.
int GetActorPid(CoreWorker &worker, const ActorID &actor_id,
std::unordered_map<std::string, double> &resources,
bool is_direct_call);
std::vector<std::string> raylet_socket_names_;
std::vector<std::string> raylet_store_socket_names_;
std::string raylet_monitor_pid_;
gcs::GcsClientOptions gcs_options_;
};
bool CoreWorkerTest::WaitForDirectCallActorState(CoreWorker &worker,
const ActorID &actor_id, bool wait_alive,
int timeout_ms) {
auto condition_func = [&worker, actor_id, wait_alive]() -> bool {
bool actor_alive = worker.direct_actor_submitter_->IsActorAlive(actor_id);
return wait_alive ? actor_alive : !actor_alive;
};
return WaitForCondition(condition_func, timeout_ms);
}
int CoreWorkerTest::GetActorPid(CoreWorker &worker, const ActorID &actor_id,
std::unordered_map<std::string, double> &resources,
bool is_direct_call) {
std::vector<TaskArg> args;
TaskOptions options{1, is_direct_call, resources};
std::vector<ObjectID> return_ids;
RayFunction func{Language::PYTHON, {"GetWorkerPid"}};
RAY_CHECK_OK(worker.SubmitActorTask(actor_id, func, args, options, &return_ids));
std::vector<std::shared_ptr<ray::RayObject>> results;
RAY_CHECK_OK(worker.Get(return_ids, -1, &results));
if (nullptr == results[0]->GetData()) {
// If failed to get actor process pid, return -1
return -1;
}
auto data = reinterpret_cast<char *>(results[0]->GetData()->Data());
std::string pid_string(data, results[0]->GetData()->Size());
return std::stoi(pid_string);
}
void CoreWorkerTest::TestNormalTask(std::unordered_map<std::string, double> &resources) {
CoreWorker driver(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
// Test for tasks with by-value and by-ref args.
{
const int num_tasks = 100;
for (int i = 0; i < num_tasks; i++) {
auto buffer1 = GenerateRandomBuffer();
auto buffer2 = GenerateRandomBuffer();
ObjectID object_id;
RAY_CHECK_OK(driver.Put(RayObject(buffer2, nullptr), &object_id));
std::vector<TaskArg> args;
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer1, nullptr)));
args.emplace_back(TaskArg::PassByReference(object_id));
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
TaskOptions options;
options.is_direct_call = true;
std::vector<ObjectID> return_ids;
RAY_CHECK_OK(
driver.SubmitTask(func, args, options, &return_ids, /*max_retries=*/0));
ASSERT_EQ(return_ids.size(), 1);
std::vector<std::shared_ptr<ray::RayObject>> results;
RAY_CHECK_OK(driver.Get(return_ids, -1, &results));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0]->GetData()->Size(), buffer1->Size() + buffer2->Size());
ASSERT_EQ(memcmp(results[0]->GetData()->Data(), buffer1->Data(), buffer1->Size()),
0);
ASSERT_EQ(memcmp(results[0]->GetData()->Data() + buffer1->Size(), buffer2->Data(),
buffer2->Size()),
0);
}
}
}
void CoreWorkerTest::TestActorTask(std::unordered_map<std::string, double> &resources,
bool is_direct_call) {
CoreWorker driver(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
auto actor_id = CreateActorHelper(driver, resources, is_direct_call, 1000);
// Test submitting some tasks with by-value args for that actor.
{
const int num_tasks = 100;
for (int i = 0; i < num_tasks; i++) {
auto buffer1 = GenerateRandomBuffer();
auto buffer2 = GenerateRandomBuffer();
// Create arguments with PassByRef and PassByValue.
std::vector<TaskArg> args;
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer1, nullptr)));
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer2, nullptr)));
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
RAY_CHECK_OK(driver.SubmitActorTask(actor_id, func, args, options, &return_ids));
ASSERT_EQ(return_ids.size(), 1);
ASSERT_TRUE(return_ids[0].IsReturnObject());
ASSERT_EQ(static_cast<TaskTransportType>(return_ids[0].GetTransportType()),
is_direct_call ? TaskTransportType::DIRECT : TaskTransportType::RAYLET);
std::vector<std::shared_ptr<ray::RayObject>> results;
RAY_CHECK_OK(driver.Get(return_ids, -1, &results));
ASSERT_EQ(results.size(), 1);
ASSERT_TRUE(!results[0]->HasMetadata())
<< "metadata: " << MetadataToString(results[0])
<< ", object ID: " << return_ids[0];
ASSERT_EQ(results[0]->GetData()->Size(), buffer1->Size() + buffer2->Size());
ASSERT_EQ(memcmp(results[0]->GetData()->Data(), buffer1->Data(), buffer1->Size()),
0);
ASSERT_EQ(memcmp(results[0]->GetData()->Data() + buffer1->Size(), buffer2->Data(),
buffer2->Size()),
0);
}
}
// Test submitting a task with both by-value and by-ref args for that actor.
{
uint8_t array1[] = {1, 2, 3, 4, 5, 6, 7, 8};
uint8_t array2[] = {10, 11, 12, 13, 14, 15};
auto buffer1 = std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1));
auto buffer2 = std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2));
ObjectID object_id;
RAY_CHECK_OK(driver.Put(RayObject(buffer1, nullptr), &object_id));
// Create arguments with PassByRef and PassByValue.
std::vector<TaskArg> args;
args.emplace_back(TaskArg::PassByReference(object_id));
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer2, nullptr)));
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
auto status = driver.SubmitActorTask(actor_id, func, args, options, &return_ids);
ASSERT_TRUE(status.ok());
ASSERT_EQ(return_ids.size(), 1);
std::vector<std::shared_ptr<ray::RayObject>> results;
RAY_CHECK_OK(driver.Get(return_ids, -1, &results));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0]->GetData()->Size(), buffer1->Size() + buffer2->Size());
ASSERT_EQ(memcmp(results[0]->GetData()->Data(), buffer1->Data(), buffer1->Size()), 0);
ASSERT_EQ(memcmp(results[0]->GetData()->Data() + buffer1->Size(), buffer2->Data(),
buffer2->Size()),
0);
}
}
void CoreWorkerTest::TestActorReconstruction(
std::unordered_map<std::string, double> &resources, bool is_direct_call) {
CoreWorker driver(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
// creating actor.
auto actor_id = CreateActorHelper(driver, resources, is_direct_call, 1000);
// Wait for actor alive event.
ASSERT_TRUE(WaitForDirectCallActorState(driver, actor_id, true, 30 * 1000 /* 30s */));
RAY_LOG(INFO) << "actor has been created";
auto pid = GetActorPid(driver, actor_id, resources, is_direct_call);
RAY_CHECK(pid != -1);
// Test submitting some tasks with by-value args for that actor.
{
const int num_tasks = 100;
const int task_index_to_kill_worker = (num_tasks + 1) / 2;
std::vector<std::pair<ObjectID, std::vector<uint8_t>>> all_results;
for (int i = 0; i < num_tasks; i++) {
if (i == task_index_to_kill_worker) {
RAY_LOG(INFO) << "killing worker";
ASSERT_EQ(system("pkill mock_worker"), 0);
// Wait for actor restruction event, and then for alive event.
auto check_actor_restart_func = [this, pid, &driver, &actor_id, &resources,
is_direct_call]() -> bool {
auto new_pid = GetActorPid(driver, actor_id, resources, is_direct_call);
return new_pid != -1 && new_pid != pid;
};
ASSERT_TRUE(WaitForCondition(check_actor_restart_func, 30 * 1000 /* 30s */));
RAY_LOG(INFO) << "actor has been reconstructed";
}
// wait for actor being reconstructed.
auto buffer1 = GenerateRandomBuffer();
// Create arguments with PassByValue.
std::vector<TaskArg> args;
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer1, nullptr)));
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
RAY_CHECK_OK(driver.SubmitActorTask(actor_id, func, args, options, &return_ids));
ASSERT_EQ(return_ids.size(), 1);
// Verify if it's expected data.
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(driver.Get(return_ids, -1, &results));
ASSERT_EQ(results[0]->GetData()->Size(), buffer1->Size());
ASSERT_EQ(*results[0]->GetData(), *buffer1);
}
}
}
void CoreWorkerTest::TestActorFailure(std::unordered_map<std::string, double> &resources,
bool is_direct_call) {
CoreWorker driver(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
// creating actor.
auto actor_id =
CreateActorHelper(driver, resources, is_direct_call, 0 /* not reconstructable */);
// Test submitting some tasks with by-value args for that actor.
{
const int num_tasks = 3000;
const int task_index_to_kill_worker = (num_tasks + 1) / 2;
std::vector<std::pair<ObjectID, std::shared_ptr<Buffer>>> all_results;
for (int i = 0; i < num_tasks; i++) {
if (i == task_index_to_kill_worker) {
RAY_LOG(INFO) << "killing worker";
ASSERT_EQ(system("pkill mock_worker"), 0);
}
// wait for actor being reconstructed.
auto buffer1 = GenerateRandomBuffer();
// Create arguments with PassByRef and PassByValue.
std::vector<TaskArg> args;
args.emplace_back(
TaskArg::PassByValue(std::make_shared<RayObject>(buffer1, nullptr)));
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
RAY_CHECK_OK(driver.SubmitActorTask(actor_id, func, args, options, &return_ids));
ASSERT_EQ(return_ids.size(), 1);
all_results.emplace_back(std::make_pair(return_ids[0], buffer1));
}
for (int i = 0; i < num_tasks; i++) {
const auto &entry = all_results[i];
std::vector<ObjectID> return_ids;
return_ids.push_back(entry.first);
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(driver.Get(return_ids, -1, &results));
ASSERT_EQ(results.size(), 1);
if (results[0]->HasMetadata()) {
// Verify if this is the desired error.
std::string meta = std::to_string(static_cast<int>(rpc::ErrorType::ACTOR_DIED));
ASSERT_TRUE(memcmp(results[0]->GetMetadata()->Data(), meta.data(), meta.size()) ==
0);
} else {
// Verify if it's expected data.
ASSERT_EQ(*results[0]->GetData(), *entry.second);
}
}
}
}
class ZeroNodeTest : public CoreWorkerTest {
public:
ZeroNodeTest() : CoreWorkerTest(0) {}
};
class SingleNodeTest : public CoreWorkerTest {
public:
SingleNodeTest() : CoreWorkerTest(1) {}
};
class TwoNodeTest : public CoreWorkerTest {
public:
TwoNodeTest() : CoreWorkerTest(2) {}
};
TEST_F(ZeroNodeTest, TestTaskArg) {
// Test by-reference argument.
ObjectID id = ObjectID::FromRandom();
TaskArg by_ref = TaskArg::PassByReference(id);
ASSERT_TRUE(by_ref.IsPassedByReference());
ASSERT_EQ(by_ref.GetReference(), id);
// Test by-value argument.
auto buffer = GenerateRandomBuffer();
TaskArg by_value = TaskArg::PassByValue(std::make_shared<RayObject>(buffer, nullptr));
ASSERT_FALSE(by_value.IsPassedByReference());
auto data = by_value.GetValue().GetData();
ASSERT_TRUE(data != nullptr);
ASSERT_EQ(*data, *buffer);
}
// Performance batchmark for `PushTaskRequest` creation.
TEST_F(ZeroNodeTest, TestTaskSpecPerf) {
// Create a dummy actor handle, and then create a number of `TaskSpec`
// to benchmark performance.
uint8_t array[] = {1, 2, 3};
auto buffer = std::make_shared<LocalMemoryBuffer>(array, sizeof(array));
RayFunction function(ray::Language::PYTHON, {});
std::vector<TaskArg> args;
args.emplace_back(TaskArg::PassByValue(std::make_shared<RayObject>(buffer, nullptr)));
std::unordered_map<std::string, double> resources;
ActorCreationOptions actor_options{0,
/*is_direct_call*/ true,
1,
resources,
resources,
{},
/*is_detached*/ false,
/*is_asyncio*/ false};
const auto job_id = NextJobId();
ActorHandle actor_handle(ActorID::Of(job_id, TaskID::ForDriverTask(job_id), 1), job_id,
ObjectID::FromRandom(), function.GetLanguage(), true,
function.GetFunctionDescriptor());
// Manually create `num_tasks` task specs, and for each of them create a
// `PushTaskRequest`, this is to batch performance of TaskSpec
// creation/copy/destruction.
int64_t start_ms = current_time_ms();
const auto num_tasks = 10000 * 10;
RAY_LOG(INFO) << "start creating " << num_tasks << " PushTaskRequests";
rpc::Address address;
for (int i = 0; i < num_tasks; i++) {
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
auto num_returns = options.num_returns;
TaskSpecBuilder builder;
builder.SetCommonTaskSpec(RandomTaskId(), function.GetLanguage(),
function.GetFunctionDescriptor(), job_id, RandomTaskId(), 0,
RandomTaskId(), address, num_returns, /*is_direct*/ false,
resources, resources);
// Set task arguments.
for (const auto &arg : args) {
if (arg.IsPassedByReference()) {
builder.AddByRefArg(arg.GetReference());
} else {
builder.AddByValueArg(arg.GetValue());
}
}
actor_handle.SetActorTaskSpec(builder, TaskTransportType::RAYLET,
ObjectID::FromRandom());
auto task_spec = builder.Build();
ASSERT_TRUE(task_spec.IsActorTask());
auto request = std::unique_ptr<rpc::PushTaskRequest>(new rpc::PushTaskRequest);
request->mutable_task_spec()->Swap(&task_spec.GetMutableMessage());
}
RAY_LOG(INFO) << "Finish creating " << num_tasks << " PushTaskRequests"
<< ", which takes " << current_time_ms() - start_ms << " ms";
}
TEST_F(SingleNodeTest, TestDirectActorTaskSubmissionPerf) {
CoreWorker driver(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], JobID::FromInt(1), gcs_options_, "",
"127.0.0.1", node_manager_port, nullptr);
std::vector<ObjectID> object_ids;
// Create an actor.
std::unordered_map<std::string, double> resources;
auto actor_id = CreateActorHelper(driver, resources,
/*is_direct_call=*/true,
/*max_reconstructions=*/0);
// wait for actor creation finish.
ASSERT_TRUE(WaitForDirectCallActorState(driver, actor_id, true, 30 * 1000 /* 30s */));
// Test submitting some tasks with by-value args for that actor.
int64_t start_ms = current_time_ms();
const int num_tasks = 100000;
RAY_LOG(INFO) << "start submitting " << num_tasks << " tasks";
for (int i = 0; i < num_tasks; i++) {
// Create arguments with PassByValue.
std::vector<TaskArg> args;
int64_t array[] = {SHOULD_CHECK_MESSAGE_ORDER, i};
auto buffer = std::make_shared<LocalMemoryBuffer>(reinterpret_cast<uint8_t *>(array),
sizeof(array));
args.emplace_back(TaskArg::PassByValue(std::make_shared<RayObject>(buffer, nullptr)));
TaskOptions options{1, false, resources};
std::vector<ObjectID> return_ids;
RayFunction func(ray::Language::PYTHON, {"MergeInputArgsAsOutput"});
RAY_CHECK_OK(driver.SubmitActorTask(actor_id, func, args, options, &return_ids));
ASSERT_EQ(return_ids.size(), 1);
object_ids.emplace_back(return_ids[0]);
}
RAY_LOG(INFO) << "finish submitting " << num_tasks << " tasks"
<< ", which takes " << current_time_ms() - start_ms << " ms";
for (const auto &object_id : object_ids) {
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(driver.Get({object_id}, -1, &results));
ASSERT_EQ(results.size(), 1);
}
RAY_LOG(INFO) << "finish executing " << num_tasks << " tasks"
<< ", which takes " << current_time_ms() - start_ms << " ms";
}
TEST_F(ZeroNodeTest, TestWorkerContext) {
auto job_id = NextJobId();
WorkerContext context(WorkerType::WORKER, job_id);
ASSERT_TRUE(context.GetCurrentTaskID().IsNil());
ASSERT_EQ(context.GetNextTaskIndex(), 1);
ASSERT_EQ(context.GetNextTaskIndex(), 2);
ASSERT_EQ(context.GetNextPutIndex(), 1);
ASSERT_EQ(context.GetNextPutIndex(), 2);
auto thread_func = [&context]() {
// Verify that task_index, put_index are thread-local.
ASSERT_EQ(context.GetNextTaskIndex(), 1);
ASSERT_EQ(context.GetNextPutIndex(), 1);
};
std::thread async_thread(thread_func);
async_thread.join();
// Verify that these fields are thread-local.
ASSERT_EQ(context.GetNextTaskIndex(), 3);
ASSERT_EQ(context.GetNextPutIndex(), 3);
}
TEST_F(ZeroNodeTest, TestActorHandle) {
// Test actor handle serialization and deserialization round trip.
JobID job_id = NextJobId();
ActorHandle original(ActorID::Of(job_id, TaskID::ForDriverTask(job_id), 0), job_id,
ObjectID::FromRandom(), Language::PYTHON, /*is_direct_call=*/false,
{});
std::string output;
original.Serialize(&output);
ActorHandle deserialized(output);
ASSERT_EQ(deserialized.GetActorID(), original.GetActorID());
ASSERT_EQ(deserialized.ActorLanguage(), original.ActorLanguage());
ASSERT_EQ(deserialized.ActorCreationTaskFunctionDescriptor(),
original.ActorCreationTaskFunctionDescriptor());
// TODO: Test submission from different handles.
}
TEST_F(SingleNodeTest, TestMemoryStoreProvider) {
std::shared_ptr<CoreWorkerMemoryStore> provider_ptr =
std::make_shared<CoreWorkerMemoryStore>();
auto &provider = *provider_ptr;
uint8_t array1[] = {1, 2, 3, 4, 5, 6, 7, 8};
uint8_t array2[] = {10, 11, 12, 13, 14, 15};
std::vector<RayObject> buffers;
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1)),
std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1) / 2));
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2)),
std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2) / 2));
std::vector<ObjectID> ids(buffers.size());
for (size_t i = 0; i < ids.size(); i++) {
ids[i] = ObjectID::FromRandom().WithDirectTransportType();
RAY_CHECK_OK(provider.Put(buffers[i], ids[i]));
}
absl::flat_hash_set<ObjectID> wait_ids(ids.begin(), ids.end());
absl::flat_hash_set<ObjectID> wait_results;
ObjectID nonexistent_id = ObjectID::FromRandom().WithDirectTransportType();
WorkerContext ctx(WorkerType::WORKER, JobID::Nil());
wait_ids.insert(nonexistent_id);
RAY_CHECK_OK(provider.Wait(wait_ids, ids.size() + 1, 100, ctx, &wait_results));
ASSERT_EQ(wait_results.size(), ids.size());
ASSERT_TRUE(wait_results.count(nonexistent_id) == 0);
// Test Wait() where the required `num_objects` is less than size of `wait_ids`.
wait_results.clear();
RAY_CHECK_OK(provider.Wait(wait_ids, ids.size(), -1, ctx, &wait_results));
ASSERT_EQ(wait_results.size(), ids.size());
ASSERT_TRUE(wait_results.count(nonexistent_id) == 0);
// Test Get().
bool got_exception = false;
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> results;
absl::flat_hash_set<ObjectID> ids_set(ids.begin(), ids.end());
RAY_CHECK_OK(provider.Get(ids_set, -1, ctx, &results, &got_exception));
ASSERT_TRUE(!got_exception);
ASSERT_EQ(results.size(), ids.size());
for (size_t i = 0; i < ids.size(); i++) {
const auto &expected = buffers[i];
ASSERT_EQ(results[ids[i]]->GetData()->Size(), expected.GetData()->Size());
ASSERT_EQ(memcmp(results[ids[i]]->GetData()->Data(), expected.GetData()->Data(),
expected.GetData()->Size()),
0);
ASSERT_EQ(results[ids[i]]->GetMetadata()->Size(), expected.GetMetadata()->Size());
ASSERT_EQ(memcmp(results[ids[i]]->GetMetadata()->Data(),
expected.GetMetadata()->Data(), expected.GetMetadata()->Size()),
0);
}
// Test Delete().
// clear the reference held.
results.clear();
absl::flat_hash_set<ObjectID> plasma_object_ids;
provider.Delete(ids_set, &plasma_object_ids);
ASSERT_TRUE(plasma_object_ids.empty());
usleep(200 * 1000);
ASSERT_TRUE(provider.Get(ids_set, 0, ctx, &results, &got_exception).IsTimedOut());
ASSERT_TRUE(!got_exception);
ASSERT_EQ(results.size(), 0);
// Test Wait() with objects which will become ready later.
std::vector<ObjectID> ready_ids(buffers.size());
std::vector<ObjectID> unready_ids(buffers.size());
for (size_t i = 0; i < unready_ids.size(); i++) {
ready_ids[i] = ObjectID::FromRandom().WithDirectTransportType();
RAY_CHECK_OK(provider.Put(buffers[i], ready_ids[i]));
unready_ids[i] = ObjectID::FromRandom().WithDirectTransportType();
}
auto thread_func = [&unready_ids, &provider, &buffers]() {
sleep(1);
for (size_t i = 0; i < unready_ids.size(); i++) {
RAY_CHECK_OK(provider.Put(buffers[i], unready_ids[i]));
}
};
std::thread async_thread(thread_func);
wait_ids.clear();
wait_ids.insert(ready_ids.begin(), ready_ids.end());
wait_ids.insert(unready_ids.begin(), unready_ids.end());
wait_results.clear();
// Check that only the ready ids are returned when timeout ends before thread runs.
RAY_CHECK_OK(provider.Wait(wait_ids, ready_ids.size() + 1, 100, ctx, &wait_results));
ASSERT_EQ(ready_ids.size(), wait_results.size());
for (const auto &ready_id : ready_ids) {
ASSERT_TRUE(wait_results.find(ready_id) != wait_results.end());
}
for (const auto &unready_id : unready_ids) {
ASSERT_TRUE(wait_results.find(unready_id) == wait_results.end());
}
wait_results.clear();
// Check that enough objects are returned after the thread inserts at least one object.
RAY_CHECK_OK(provider.Wait(wait_ids, ready_ids.size() + 1, 5000, ctx, &wait_results));
ASSERT_TRUE(wait_results.size() >= ready_ids.size() + 1);
for (const auto &ready_id : ready_ids) {
ASSERT_TRUE(wait_results.find(ready_id) != wait_results.end());
}
wait_results.clear();
// Check that all objects are returned after the thread completes.
async_thread.join();
RAY_CHECK_OK(provider.Wait(wait_ids, wait_ids.size(), -1, ctx, &wait_results));
ASSERT_EQ(wait_results.size(), ready_ids.size() + unready_ids.size());
for (const auto &ready_id : ready_ids) {
ASSERT_TRUE(wait_results.find(ready_id) != wait_results.end());
}
for (const auto &unready_id : unready_ids) {
ASSERT_TRUE(wait_results.find(unready_id) != wait_results.end());
}
}
TEST_F(SingleNodeTest, TestObjectInterface) {
CoreWorker core_worker(WorkerType::DRIVER, Language::PYTHON,
raylet_store_socket_names_[0], raylet_socket_names_[0],
JobID::FromInt(1), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
uint8_t array1[] = {1, 2, 3, 4, 5, 6, 7, 8};
uint8_t array2[] = {10, 11, 12, 13, 14, 15};
std::vector<RayObject> buffers;
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1)),
std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1) / 2));
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2)),
std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2) / 2));
std::vector<ObjectID> ids(buffers.size());
for (size_t i = 0; i < ids.size(); i++) {
RAY_CHECK_OK(core_worker.Put(buffers[i], &ids[i]));
}
// Test Get().
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(core_worker.Get(ids, -1, &results));
ASSERT_EQ(results.size(), ids.size());
for (size_t i = 0; i < ids.size(); i++) {
ASSERT_EQ(*results[i]->GetData(), *buffers[i].GetData());
ASSERT_EQ(*results[i]->GetMetadata(), *buffers[i].GetMetadata());
}
// Test Get() returns early when it encounters an error.
std::vector<ObjectID> ids_with_exception(ids.begin(), ids.end());
ids_with_exception.push_back(ObjectID::FromRandom());
std::vector<RayObject> buffers_with_exception(buffers.begin(), buffers.end());
std::string error_string = std::to_string(ray::rpc::TASK_EXECUTION_EXCEPTION);
char error_buffer[error_string.size()];
size_t len = error_string.copy(error_buffer, error_string.size(), 0);
buffers_with_exception.emplace_back(
nullptr, std::make_shared<LocalMemoryBuffer>(
reinterpret_cast<uint8_t *>(error_buffer), len));
RAY_CHECK_OK(core_worker.Put(buffers_with_exception.back(), ids_with_exception.back()));
RAY_CHECK_OK(core_worker.Get(ids_with_exception, -1, &results));
// Test Wait().
ObjectID non_existent_id = ObjectID::FromRandom();
std::vector<ObjectID> all_ids(ids);
all_ids.push_back(non_existent_id);
std::vector<bool> wait_results;
RAY_CHECK_OK(core_worker.Wait(all_ids, 2, -1, &wait_results));
ASSERT_EQ(wait_results.size(), 3);
ASSERT_EQ(wait_results, std::vector<bool>({true, true, false}));
RAY_CHECK_OK(core_worker.Wait(all_ids, 3, 100, &wait_results));
ASSERT_EQ(wait_results.size(), 3);
ASSERT_EQ(wait_results, std::vector<bool>({true, true, false}));
// Test Delete().
// clear the reference held by PlasmaBuffer.
results.clear();
RAY_CHECK_OK(core_worker.Delete(ids, true, false));
// Note that Delete() calls RayletClient::FreeObjects and would not
// wait for objects being deleted, so wait a while for plasma store
// to process the command.
usleep(200 * 1000);
ASSERT_TRUE(core_worker.Get(ids, 0, &results).IsTimedOut());
ASSERT_EQ(results.size(), 2);
ASSERT_TRUE(!results[0]);
ASSERT_TRUE(!results[1]);
}
TEST_F(TwoNodeTest, TestObjectInterfaceCrossNodes) {
CoreWorker worker1(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[0],
raylet_socket_names_[0], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
CoreWorker worker2(WorkerType::DRIVER, Language::PYTHON, raylet_store_socket_names_[1],
raylet_socket_names_[1], NextJobId(), gcs_options_, "", "127.0.0.1",
node_manager_port, nullptr);
uint8_t array1[] = {1, 2, 3, 4, 5, 6, 7, 8};
uint8_t array2[] = {10, 11, 12, 13, 14, 15};
std::vector<std::shared_ptr<LocalMemoryBuffer>> buffers;
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array1, sizeof(array1)));
buffers.emplace_back(std::make_shared<LocalMemoryBuffer>(array2, sizeof(array2)));
std::vector<ObjectID> ids(buffers.size());
for (size_t i = 0; i < ids.size(); i++) {
RAY_CHECK_OK(worker1.Put(RayObject(buffers[i], nullptr), &ids[i]));
}
// Test Get() from remote node.
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(worker2.Get(ids, -1, &results));
ASSERT_EQ(results.size(), 2);
for (size_t i = 0; i < ids.size(); i++) {
ASSERT_EQ(results[i]->GetData()->Size(), buffers[i]->Size());
ASSERT_EQ(*(results[i]->GetData()), *buffers[i]);
}
// Test Wait() from remote node.
ObjectID non_existent_id = ObjectID::FromRandom();
std::vector<ObjectID> all_ids(ids);
all_ids.push_back(non_existent_id);
std::vector<bool> wait_results;
RAY_CHECK_OK(worker2.Wait(all_ids, 2, -1, &wait_results));
ASSERT_EQ(wait_results.size(), 3);
ASSERT_EQ(wait_results, std::vector<bool>({true, true, false}));
RAY_CHECK_OK(worker2.Wait(all_ids, 3, 100, &wait_results));
ASSERT_EQ(wait_results.size(), 3);
ASSERT_EQ(wait_results, std::vector<bool>({true, true, false}));
// Test Delete() from all machines.
// clear the reference held by PlasmaBuffer.
results.clear();
RAY_CHECK_OK(worker2.Delete(ids, false, false));
// Note that Delete() calls RayletClient::FreeObjects and would not
// wait for objects being deleted, so wait a while for plasma store
// to process the command.
usleep(1000 * 1000);
// Verify objects are deleted from both machines.
ASSERT_TRUE(worker2.Get(ids, 0, &results).IsTimedOut());
ASSERT_EQ(results.size(), 2);
ASSERT_TRUE(!results[0]);
ASSERT_TRUE(!results[1]);
// TODO(edoakes): this currently fails because the object is pinned on the
// creating node. Should be fixed or removed once we decide the semantics
// for Delete() with pinning.
// ASSERT_TRUE(worker1.Get(ids, 0, &results).IsTimedOut());
// ASSERT_EQ(results.size(), 2);
// ASSERT_TRUE(!results[0]);
// ASSERT_TRUE(!results[1]);
}
TEST_F(SingleNodeTest, TestNormalTaskLocal) {
std::unordered_map<std::string, double> resources;
TestNormalTask(resources);
}
TEST_F(TwoNodeTest, TestNormalTaskCrossNodes) {
std::unordered_map<std::string, double> resources;
resources.emplace("resource1", 1);
TestNormalTask(resources);
}
TEST_F(SingleNodeTest, TestActorTaskLocal) {
std::unordered_map<std::string, double> resources;
TestActorTask(resources, false);
}
TEST_F(TwoNodeTest, TestActorTaskCrossNodes) {
std::unordered_map<std::string, double> resources;
resources.emplace("resource1", 1);
TestActorTask(resources, false);
}
TEST_F(SingleNodeTest, TestDirectActorTaskLocal) {
std::unordered_map<std::string, double> resources;
TestActorTask(resources, true);
}
TEST_F(TwoNodeTest, TestDirectActorTaskCrossNodes) {
std::unordered_map<std::string, double> resources;
resources.emplace("resource1", 1);
TestActorTask(resources, true);
}
TEST_F(SingleNodeTest, TestDirectActorTaskLocalReconstruction) {
std::unordered_map<std::string, double> resources;
TestActorReconstruction(resources, true);
}
TEST_F(TwoNodeTest, TestDirectActorTaskCrossNodesReconstruction) {
std::unordered_map<std::string, double> resources;
resources.emplace("resource1", 1);
TestActorReconstruction(resources, true);
}
TEST_F(SingleNodeTest, TestDirectActorTaskLocalFailure) {
std::unordered_map<std::string, double> resources;
TestActorFailure(resources, true);
}
TEST_F(TwoNodeTest, TestDirectActorTaskCrossNodesFailure) {
std::unordered_map<std::string, double> resources;
resources.emplace("resource1", 1);
TestActorFailure(resources, true);
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 6);
store_executable = std::string(argv[1]);
raylet_executable = std::string(argv[2]);
node_manager_port = std::stoi(std::string(argv[3]));
raylet_monitor_executable = std::string(argv[4]);
mock_worker_executable = std::string(argv[5]);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/direct_actor_transport_test.cc
|
C++
|
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ray/common/task/task_spec.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/transport/direct_task_transport.h"
#include "ray/raylet/raylet_client.h"
#include "ray/rpc/worker/core_worker_client.h"
#include "src/ray/util/test_util.h"
namespace ray {
using ::testing::_;
class MockWorkerClient : public rpc::CoreWorkerClientInterface {
public:
ray::Status PushActorTask(
std::unique_ptr<rpc::PushTaskRequest> request,
const rpc::ClientCallback<rpc::PushTaskReply> &callback) override {
RAY_CHECK(counter == request->task_spec().actor_task_spec().actor_counter());
counter++;
callbacks.push_back(callback);
return Status::OK();
}
bool ReplyPushTask(Status status = Status::OK()) {
if (callbacks.size() == 0) {
return false;
}
auto callback = callbacks.front();
callback(status, rpc::PushTaskReply());
callbacks.pop_front();
return true;
}
std::list<rpc::ClientCallback<rpc::PushTaskReply>> callbacks;
uint64_t counter = 0;
};
class MockTaskFinisher : public TaskFinisherInterface {
public:
MockTaskFinisher() {}
MOCK_METHOD3(CompletePendingTask, void(const TaskID &, const rpc::PushTaskReply &,
const rpc::Address *addr));
MOCK_METHOD3(PendingTaskFailed,
void(const TaskID &task_id, rpc::ErrorType error_type, Status *status));
MOCK_METHOD1(OnTaskDependenciesInlined, void(const std::vector<ObjectID> &object_ids));
};
TaskSpecification CreateActorTaskHelper(ActorID actor_id, int64_t counter) {
TaskSpecification task;
task.GetMutableMessage().set_task_id(TaskID::Nil().Binary());
task.GetMutableMessage().set_type(TaskType::ACTOR_TASK);
task.GetMutableMessage().mutable_actor_task_spec()->set_actor_id(actor_id.Binary());
task.GetMutableMessage().mutable_actor_task_spec()->set_actor_counter(counter);
return task;
}
class DirectActorTransportTest : public ::testing::Test {
public:
DirectActorTransportTest()
: worker_client_(std::shared_ptr<MockWorkerClient>(new MockWorkerClient())),
store_(std::shared_ptr<CoreWorkerMemoryStore>(new CoreWorkerMemoryStore())),
task_finisher_(std::make_shared<MockTaskFinisher>()),
submitter_(address_,
[&](const std::string ip, int port) { return worker_client_; }, store_,
task_finisher_) {}
rpc::Address address_;
std::shared_ptr<MockWorkerClient> worker_client_;
std::shared_ptr<CoreWorkerMemoryStore> store_;
std::shared_ptr<MockTaskFinisher> task_finisher_;
CoreWorkerDirectActorTaskSubmitter submitter_;
};
TEST_F(DirectActorTransportTest, TestSubmitTask) {
rpc::Address addr;
ActorID actor_id = ActorID::Of(JobID::FromInt(0), TaskID::Nil(), 0);
auto task = CreateActorTaskHelper(actor_id, 0);
ASSERT_TRUE(submitter_.SubmitTask(task).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 0);
submitter_.ConnectActor(actor_id, addr);
ASSERT_EQ(worker_client_->callbacks.size(), 1);
task = CreateActorTaskHelper(actor_id, 1);
ASSERT_TRUE(submitter_.SubmitTask(task).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 2);
EXPECT_CALL(*task_finisher_, CompletePendingTask(TaskID::Nil(), _, _))
.Times(worker_client_->callbacks.size());
EXPECT_CALL(*task_finisher_, PendingTaskFailed(_, _, _)).Times(0);
while (!worker_client_->callbacks.empty()) {
ASSERT_TRUE(worker_client_->ReplyPushTask());
}
}
TEST_F(DirectActorTransportTest, TestDependencies) {
rpc::Address addr;
ActorID actor_id = ActorID::Of(JobID::FromInt(0), TaskID::Nil(), 0);
submitter_.ConnectActor(actor_id, addr);
ASSERT_EQ(worker_client_->callbacks.size(), 0);
// Create two tasks for the actor with different arguments.
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID obj2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
auto task1 = CreateActorTaskHelper(actor_id, 0);
task1.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
auto task2 = CreateActorTaskHelper(actor_id, 1);
task2.GetMutableMessage().add_args()->add_object_ids(obj2.Binary());
// Neither task can be submitted yet because they are still waiting on
// dependencies.
ASSERT_TRUE(submitter_.SubmitTask(task1).ok());
ASSERT_TRUE(submitter_.SubmitTask(task2).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 0);
// Put the dependencies in the store in the same order as task submission.
auto data = GenerateRandomObject();
ASSERT_TRUE(store_->Put(*data, obj1).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 1);
ASSERT_TRUE(store_->Put(*data, obj2).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 2);
}
TEST_F(DirectActorTransportTest, TestOutOfOrderDependencies) {
rpc::Address addr;
ActorID actor_id = ActorID::Of(JobID::FromInt(0), TaskID::Nil(), 0);
submitter_.ConnectActor(actor_id, addr);
ASSERT_EQ(worker_client_->callbacks.size(), 0);
// Create two tasks for the actor with different arguments.
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID obj2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
auto task1 = CreateActorTaskHelper(actor_id, 0);
task1.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
auto task2 = CreateActorTaskHelper(actor_id, 1);
task2.GetMutableMessage().add_args()->add_object_ids(obj2.Binary());
// Neither task can be submitted yet because they are still waiting on
// dependencies.
ASSERT_TRUE(submitter_.SubmitTask(task1).ok());
ASSERT_TRUE(submitter_.SubmitTask(task2).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 0);
// Put the dependencies in the store in the opposite order of task
// submission.
auto data = GenerateRandomObject();
ASSERT_TRUE(store_->Put(*data, obj2).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 0);
ASSERT_TRUE(store_->Put(*data, obj1).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 2);
}
TEST_F(DirectActorTransportTest, TestActorFailure) {
rpc::Address addr;
ActorID actor_id = ActorID::Of(JobID::FromInt(0), TaskID::Nil(), 0);
gcs::ActorTableData actor_data;
submitter_.ConnectActor(actor_id, addr);
ASSERT_EQ(worker_client_->callbacks.size(), 0);
// Create two tasks for the actor.
auto task1 = CreateActorTaskHelper(actor_id, 0);
auto task2 = CreateActorTaskHelper(actor_id, 1);
ASSERT_TRUE(submitter_.SubmitTask(task1).ok());
ASSERT_TRUE(submitter_.SubmitTask(task2).ok());
ASSERT_EQ(worker_client_->callbacks.size(), 2);
// Simulate the actor dying. All submitted tasks should get failed.
EXPECT_CALL(*task_finisher_, PendingTaskFailed(_, _, _)).Times(2);
EXPECT_CALL(*task_finisher_, CompletePendingTask(_, _, _)).Times(0);
while (!worker_client_->callbacks.empty()) {
ASSERT_TRUE(worker_client_->ReplyPushTask(Status::IOError("")));
}
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/direct_task_transport_test.cc
|
C++
|
#include "ray/core_worker/transport/direct_task_transport.h"
#include "gtest/gtest.h"
#include "ray/common/task/task_spec.h"
#include "ray/common/task/task_util.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/raylet/raylet_client.h"
#include "ray/rpc/worker/core_worker_client.h"
#include "src/ray/util/test_util.h"
namespace ray {
// Used to prevent leases from timing out when not testing that logic. It would
// be better to use a mock clock or lease manager interface, but that's high
// overhead for the very simple timeout logic we currently have.
int64_t kLongTimeout = 1024 * 1024 * 1024;
class MockWorkerClient : public rpc::CoreWorkerClientInterface {
public:
ray::Status PushNormalTask(
std::unique_ptr<rpc::PushTaskRequest> request,
const rpc::ClientCallback<rpc::PushTaskReply> &callback) override {
callbacks.push_back(callback);
return Status::OK();
}
bool ReplyPushTask(Status status = Status::OK(), bool exit = false) {
if (callbacks.size() == 0) {
return false;
}
auto callback = callbacks.front();
auto reply = rpc::PushTaskReply();
if (exit) {
reply.set_worker_exiting(true);
}
callback(status, reply);
callbacks.pop_front();
return true;
}
std::list<rpc::ClientCallback<rpc::PushTaskReply>> callbacks;
};
class MockTaskFinisher : public TaskFinisherInterface {
public:
MockTaskFinisher() {}
void CompletePendingTask(const TaskID &, const rpc::PushTaskReply &,
const rpc::Address *actor_addr) override {
num_tasks_complete++;
}
void PendingTaskFailed(const TaskID &task_id, rpc::ErrorType error_type,
Status *status) override {
num_tasks_failed++;
}
void OnTaskDependenciesInlined(const std::vector<ObjectID> &object_ids) override {
num_inlined += object_ids.size();
}
int num_tasks_complete = 0;
int num_tasks_failed = 0;
int num_inlined = 0;
};
class MockRayletClient : public WorkerLeaseInterface {
public:
ray::Status ReturnWorker(int worker_port, const WorkerID &worker_id,
bool disconnect_worker) override {
if (disconnect_worker) {
num_workers_disconnected++;
} else {
num_workers_returned++;
}
return Status::OK();
}
ray::Status RequestWorkerLease(
const ray::TaskSpecification &resource_spec,
const rpc::ClientCallback<rpc::RequestWorkerLeaseReply> &callback) override {
num_workers_requested += 1;
callbacks.push_back(callback);
return Status::OK();
}
// Trigger reply to RequestWorkerLease.
bool GrantWorkerLease(const std::string &address, int port,
const ClientID &retry_at_raylet_id) {
rpc::RequestWorkerLeaseReply reply;
if (!retry_at_raylet_id.IsNil()) {
reply.mutable_retry_at_raylet_address()->set_ip_address(address);
reply.mutable_retry_at_raylet_address()->set_port(port);
reply.mutable_retry_at_raylet_address()->set_raylet_id(retry_at_raylet_id.Binary());
} else {
reply.mutable_worker_address()->set_ip_address(address);
reply.mutable_worker_address()->set_port(port);
reply.mutable_worker_address()->set_raylet_id(retry_at_raylet_id.Binary());
}
if (callbacks.size() == 0) {
return false;
} else {
auto callback = callbacks.front();
callback(Status::OK(), reply);
callbacks.pop_front();
return true;
}
}
~MockRayletClient() {}
int num_workers_requested = 0;
int num_workers_returned = 0;
int num_workers_disconnected = 0;
std::list<rpc::ClientCallback<rpc::RequestWorkerLeaseReply>> callbacks = {};
};
TEST(TestMemoryStore, TestPromoteToPlasma) {
bool num_plasma_puts = 0;
auto mem = std::make_shared<CoreWorkerMemoryStore>(
[&](const RayObject &obj, const ObjectID &obj_id) { num_plasma_puts += 1; });
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID obj2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
auto data = GenerateRandomObject();
ASSERT_TRUE(mem->Put(*data, obj1).ok());
// Test getting an already existing object.
ASSERT_TRUE(mem->GetOrPromoteToPlasma(obj1) != nullptr);
ASSERT_TRUE(num_plasma_puts == 0);
// Testing getting an object that doesn't exist yet causes promotion.
ASSERT_TRUE(mem->GetOrPromoteToPlasma(obj2) == nullptr);
ASSERT_TRUE(num_plasma_puts == 0);
ASSERT_TRUE(mem->Put(*data, obj2).ok());
ASSERT_TRUE(num_plasma_puts == 1);
// The next time you get it, it's already there so no need to promote.
ASSERT_TRUE(mem->GetOrPromoteToPlasma(obj2) != nullptr);
ASSERT_TRUE(num_plasma_puts == 1);
}
TEST(LocalDependencyResolverTest, TestNoDependencies) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto task_finisher = std::make_shared<MockTaskFinisher>();
LocalDependencyResolver resolver(store, task_finisher);
TaskSpecification task;
bool ok = false;
resolver.ResolveDependencies(task, [&ok]() { ok = true; });
ASSERT_TRUE(ok);
ASSERT_EQ(task_finisher->num_inlined, 0);
}
TEST(LocalDependencyResolverTest, TestIgnorePlasmaDependencies) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto task_finisher = std::make_shared<MockTaskFinisher>();
LocalDependencyResolver resolver(store, task_finisher);
ObjectID obj1 = ObjectID::FromRandom();
TaskSpecification task;
task.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
bool ok = false;
resolver.ResolveDependencies(task, [&ok]() { ok = true; });
// We ignore and don't block on plasma dependencies.
ASSERT_TRUE(ok);
ASSERT_EQ(resolver.NumPendingTasks(), 0);
ASSERT_EQ(task_finisher->num_inlined, 0);
}
TEST(LocalDependencyResolverTest, TestHandlePlasmaPromotion) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto task_finisher = std::make_shared<MockTaskFinisher>();
LocalDependencyResolver resolver(store, task_finisher);
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
std::string meta = std::to_string(static_cast<int>(rpc::ErrorType::OBJECT_IN_PLASMA));
auto metadata = const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(meta.data()));
auto meta_buffer = std::make_shared<LocalMemoryBuffer>(metadata, meta.size());
auto data = RayObject(nullptr, meta_buffer);
ASSERT_TRUE(store->Put(data, obj1).ok());
TaskSpecification task;
task.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
ASSERT_TRUE(task.ArgId(0, 0).IsDirectCallType());
bool ok = false;
resolver.ResolveDependencies(task, [&ok]() { ok = true; });
ASSERT_TRUE(ok);
ASSERT_TRUE(task.ArgByRef(0));
// Checks that the object id is still a direct call id.
ASSERT_TRUE(task.ArgId(0, 0).IsDirectCallType());
ASSERT_EQ(resolver.NumPendingTasks(), 0);
ASSERT_EQ(task_finisher->num_inlined, 0);
}
TEST(LocalDependencyResolverTest, TestInlineLocalDependencies) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto task_finisher = std::make_shared<MockTaskFinisher>();
LocalDependencyResolver resolver(store, task_finisher);
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID obj2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
auto data = GenerateRandomObject();
// Ensure the data is already present in the local store.
ASSERT_TRUE(store->Put(*data, obj1).ok());
ASSERT_TRUE(store->Put(*data, obj2).ok());
TaskSpecification task;
task.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
task.GetMutableMessage().add_args()->add_object_ids(obj2.Binary());
bool ok = false;
resolver.ResolveDependencies(task, [&ok]() { ok = true; });
// Tests that the task proto was rewritten to have inline argument values.
ASSERT_TRUE(ok);
ASSERT_FALSE(task.ArgByRef(0));
ASSERT_FALSE(task.ArgByRef(1));
ASSERT_NE(task.ArgData(0), nullptr);
ASSERT_NE(task.ArgData(1), nullptr);
ASSERT_EQ(resolver.NumPendingTasks(), 0);
ASSERT_EQ(task_finisher->num_inlined, 2);
}
TEST(LocalDependencyResolverTest, TestInlinePendingDependencies) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto task_finisher = std::make_shared<MockTaskFinisher>();
LocalDependencyResolver resolver(store, task_finisher);
ObjectID obj1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID obj2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
auto data = GenerateRandomObject();
TaskSpecification task;
task.GetMutableMessage().add_args()->add_object_ids(obj1.Binary());
task.GetMutableMessage().add_args()->add_object_ids(obj2.Binary());
bool ok = false;
resolver.ResolveDependencies(task, [&ok]() { ok = true; });
ASSERT_EQ(resolver.NumPendingTasks(), 1);
ASSERT_TRUE(!ok);
ASSERT_TRUE(store->Put(*data, obj1).ok());
ASSERT_TRUE(store->Put(*data, obj2).ok());
// Tests that the task proto was rewritten to have inline argument values after
// resolution completes.
ASSERT_TRUE(ok);
ASSERT_FALSE(task.ArgByRef(0));
ASSERT_FALSE(task.ArgByRef(1));
ASSERT_NE(task.ArgData(0), nullptr);
ASSERT_NE(task.ArgData(1), nullptr);
ASSERT_EQ(resolver.NumPendingTasks(), 0);
ASSERT_EQ(task_finisher->num_inlined, 2);
}
TaskSpecification BuildTaskSpec(const std::unordered_map<std::string, double> &resources,
const std::vector<std::string> &function_descriptor) {
TaskSpecBuilder builder;
rpc::Address empty_address;
builder.SetCommonTaskSpec(TaskID::Nil(), Language::PYTHON, function_descriptor,
JobID::Nil(), TaskID::Nil(), 0, TaskID::Nil(), empty_address,
1, true, resources, resources);
return builder.Build();
}
TEST(DirectTaskTransportTest, TestSubmitOneTask) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1234, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(task_finisher->num_tasks_complete, 0);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 1);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
TEST(DirectTaskTransportTest, TestHandleTaskFailure) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task).ok());
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1234, ClientID::Nil()));
// Simulate a system failure, i.e., worker died unexpectedly.
ASSERT_TRUE(worker_client->ReplyPushTask(Status::IOError("oops")));
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
ASSERT_EQ(task_finisher->num_tasks_complete, 0);
ASSERT_EQ(task_finisher->num_tasks_failed, 1);
}
TEST(DirectTaskTransportTest, TestConcurrentWorkerLeases) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task1 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task2 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task3 = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task1).ok());
ASSERT_TRUE(submitter.SubmitTask(task2).ok());
ASSERT_TRUE(submitter.SubmitTask(task3).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
// Task 1 is pushed; worker 2 is requested.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(raylet_client->num_workers_requested, 2);
// Task 2 is pushed; worker 3 is requested.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1001, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 2);
ASSERT_EQ(raylet_client->num_workers_requested, 3);
// Task 3 is pushed; no more workers requested.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1002, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 3);
ASSERT_EQ(raylet_client->num_workers_requested, 3);
// All workers returned.
while (!worker_client->callbacks.empty()) {
ASSERT_TRUE(worker_client->ReplyPushTask());
}
ASSERT_EQ(raylet_client->num_workers_returned, 3);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 3);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
TEST(DirectTaskTransportTest, TestReuseWorkerLease) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task1 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task2 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task3 = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task1).ok());
ASSERT_TRUE(submitter.SubmitTask(task2).ok());
ASSERT_TRUE(submitter.SubmitTask(task3).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
// Task 1 is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(raylet_client->num_workers_requested, 2);
// Task 1 finishes, Task 2 is scheduled on the same worker.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
// Task 2 finishes, Task 3 is scheduled on the same worker.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
// Task 3 finishes, the worker is returned.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
// The second lease request is returned immediately.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1001, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(raylet_client->num_workers_returned, 2);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 3);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
TEST(DirectTaskTransportTest, TestWorkerNotReusedOnError) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task1 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task2 = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task1).ok());
ASSERT_TRUE(submitter.SubmitTask(task2).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
// Task 1 is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
ASSERT_EQ(raylet_client->num_workers_requested, 2);
// Task 1 finishes with failure; the worker is returned.
ASSERT_TRUE(worker_client->ReplyPushTask(Status::IOError("worker dead")));
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
// Task 2 runs successfully on the second worker.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1001, ClientID::Nil()));
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
ASSERT_EQ(task_finisher->num_tasks_complete, 1);
ASSERT_EQ(task_finisher->num_tasks_failed, 1);
}
TEST(DirectTaskTransportTest, TestWorkerNotReturnedOnExit) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task1 = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task1).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
// Task 1 is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
// Task 1 finishes with exit status; the worker is not returned.
ASSERT_TRUE(worker_client->ReplyPushTask(Status::OK(), /*exit=*/true));
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 1);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
TEST(DirectTaskTransportTest, TestSpillback) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
std::unordered_map<int, std::shared_ptr<MockRayletClient>> remote_lease_clients;
auto lease_client_factory = [&](const std::string &ip, int port) {
// We should not create a connection to the same raylet more than once.
RAY_CHECK(remote_lease_clients.count(port) == 0);
auto client = std::make_shared<MockRayletClient>();
remote_lease_clients[port] = client;
return client;
};
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory,
lease_client_factory, store, task_finisher,
ClientID::Nil(), kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(remote_lease_clients.size(), 0);
// Spillback to a remote node.
auto remote_raylet_id = ClientID::FromRandom();
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 7777, remote_raylet_id));
ASSERT_EQ(remote_lease_clients.count(7777), 1);
// There should be no more callbacks on the local client.
ASSERT_FALSE(raylet_client->GrantWorkerLease("remote", 1234, ClientID::Nil()));
// Trigger retry at the remote node.
ASSERT_TRUE(
remote_lease_clients[7777]->GrantWorkerLease("remote", 1234, ClientID::Nil()));
// The worker is returned to the remote node, not the local one.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(remote_lease_clients[7777]->num_workers_returned, 1);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(remote_lease_clients[7777]->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 1);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
TEST(DirectTaskTransportTest, TestSpillbackRoundTrip) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
std::unordered_map<int, std::shared_ptr<MockRayletClient>> remote_lease_clients;
auto lease_client_factory = [&](const std::string &ip, int port) {
// We should not create a connection to the same raylet more than once.
RAY_CHECK(remote_lease_clients.count(port) == 0);
auto client = std::make_shared<MockRayletClient>();
remote_lease_clients[port] = client;
return client;
};
auto task_finisher = std::make_shared<MockTaskFinisher>();
auto local_raylet_id = ClientID::FromRandom();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory,
lease_client_factory, store, task_finisher,
local_raylet_id, kLongTimeout);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(remote_lease_clients.size(), 0);
// Spillback to a remote node.
auto remote_raylet_id = ClientID::FromRandom();
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 7777, remote_raylet_id));
ASSERT_EQ(remote_lease_clients.count(7777), 1);
ASSERT_FALSE(raylet_client->GrantWorkerLease("remote", 1234, ClientID::Nil()));
// Trigger a spillback back to the local node.
ASSERT_TRUE(
remote_lease_clients[7777]->GrantWorkerLease("local", 1234, local_raylet_id));
// We should not have created another lease client to the local raylet.
ASSERT_EQ(remote_lease_clients.size(), 1);
// There should be no more callbacks on the remote node.
ASSERT_FALSE(
remote_lease_clients[7777]->GrantWorkerLease("remote", 1234, ClientID::Nil()));
// The worker is returned to the local node.
ASSERT_TRUE(raylet_client->GrantWorkerLease("local", 1234, ClientID::Nil()));
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
ASSERT_EQ(remote_lease_clients[7777]->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
ASSERT_EQ(remote_lease_clients[7777]->num_workers_disconnected, 0);
ASSERT_EQ(task_finisher->num_tasks_complete, 1);
ASSERT_EQ(task_finisher->num_tasks_failed, 0);
}
// Helper to run a test that checks that 'same1' and 'same2' are treated as the same
// resource shape, while 'different' is treated as a separate shape.
void TestSchedulingKey(const std::shared_ptr<CoreWorkerMemoryStore> store,
const TaskSpecification &same1, const TaskSpecification &same2,
const TaskSpecification &different) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(), kLongTimeout);
ASSERT_TRUE(submitter.SubmitTask(same1).ok());
ASSERT_TRUE(submitter.SubmitTask(same2).ok());
ASSERT_TRUE(submitter.SubmitTask(different).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 2);
// same1 is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 1);
// Another worker is requested because same2 is pending.
ASSERT_EQ(raylet_client->num_workers_requested, 3);
// same1 runs successfully. Worker isn't returned.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
// taske1_2 is pushed.
ASSERT_EQ(worker_client->callbacks.size(), 1);
// different is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1001, ClientID::Nil()));
ASSERT_EQ(worker_client->callbacks.size(), 2);
ASSERT_EQ(raylet_client->num_workers_requested, 3);
// same2 runs successfully. Worker is returned.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
// different runs successfully. Worker is returned.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 2);
ASSERT_EQ(raylet_client->num_workers_disconnected, 0);
}
TEST(DirectTaskTransportTest, TestSchedulingKeys) {
auto store = std::make_shared<CoreWorkerMemoryStore>();
std::unordered_map<std::string, double> resources1({{"a", 1.0}});
std::unordered_map<std::string, double> resources2({{"b", 2.0}});
std::vector<std::string> descriptor1({"a"});
std::vector<std::string> descriptor2({"b"});
// Tasks with different resources should request different worker leases.
RAY_LOG(INFO) << "Test different resources";
TestSchedulingKey(store, BuildTaskSpec(resources1, descriptor1),
BuildTaskSpec(resources1, descriptor1),
BuildTaskSpec(resources2, descriptor1));
// Tasks with different function descriptors should request different worker leases.
RAY_LOG(INFO) << "Test different descriptors";
TestSchedulingKey(store, BuildTaskSpec(resources1, descriptor1),
BuildTaskSpec(resources1, descriptor1),
BuildTaskSpec(resources1, descriptor2));
ObjectID direct1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID direct2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID plasma1 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
ObjectID plasma2 = ObjectID::FromRandom().WithTransportType(TaskTransportType::DIRECT);
// Ensure the data is already present in the local store for direct call objects.
auto data = GenerateRandomObject();
ASSERT_TRUE(store->Put(*data, direct1).ok());
ASSERT_TRUE(store->Put(*data, direct2).ok());
// Force plasma objects to be promoted.
std::string meta = std::to_string(static_cast<int>(rpc::ErrorType::OBJECT_IN_PLASMA));
auto metadata = const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(meta.data()));
auto meta_buffer = std::make_shared<LocalMemoryBuffer>(metadata, meta.size());
auto plasma_data = RayObject(nullptr, meta_buffer);
ASSERT_TRUE(store->Put(plasma_data, plasma1).ok());
ASSERT_TRUE(store->Put(plasma_data, plasma2).ok());
TaskSpecification same_deps_1 = BuildTaskSpec(resources1, descriptor1);
same_deps_1.GetMutableMessage().add_args()->add_object_ids(direct1.Binary());
same_deps_1.GetMutableMessage().add_args()->add_object_ids(plasma1.Binary());
TaskSpecification same_deps_2 = BuildTaskSpec(resources1, descriptor1);
same_deps_2.GetMutableMessage().add_args()->add_object_ids(direct1.Binary());
same_deps_2.GetMutableMessage().add_args()->add_object_ids(direct2.Binary());
same_deps_2.GetMutableMessage().add_args()->add_object_ids(plasma1.Binary());
TaskSpecification different_deps = BuildTaskSpec(resources1, descriptor1);
different_deps.GetMutableMessage().add_args()->add_object_ids(direct1.Binary());
different_deps.GetMutableMessage().add_args()->add_object_ids(direct2.Binary());
different_deps.GetMutableMessage().add_args()->add_object_ids(plasma2.Binary());
// Tasks with different plasma dependencies should request different worker leases,
// but direct call dependencies shouldn't be considered.
RAY_LOG(INFO) << "Test different dependencies";
TestSchedulingKey(store, same_deps_1, same_deps_2, different_deps);
}
TEST(DirectTaskTransportTest, TestWorkerLeaseTimeout) {
rpc::Address address;
auto raylet_client = std::make_shared<MockRayletClient>();
auto worker_client = std::make_shared<MockWorkerClient>();
auto store = std::make_shared<CoreWorkerMemoryStore>();
auto factory = [&](const std::string &addr, int port) { return worker_client; };
auto task_finisher = std::make_shared<MockTaskFinisher>();
CoreWorkerDirectTaskSubmitter submitter(address, raylet_client, factory, nullptr, store,
task_finisher, ClientID::Nil(),
/*lease_timeout_ms=*/5);
std::unordered_map<std::string, double> empty_resources;
std::vector<std::string> empty_descriptor;
TaskSpecification task1 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task2 = BuildTaskSpec(empty_resources, empty_descriptor);
TaskSpecification task3 = BuildTaskSpec(empty_resources, empty_descriptor);
ASSERT_TRUE(submitter.SubmitTask(task1).ok());
ASSERT_TRUE(submitter.SubmitTask(task2).ok());
ASSERT_TRUE(submitter.SubmitTask(task3).ok());
ASSERT_EQ(raylet_client->num_workers_requested, 1);
// Task 1 is pushed.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1000, ClientID::Nil()));
ASSERT_EQ(raylet_client->num_workers_requested, 2);
// Task 1 finishes with failure; the worker is returned due to the error even though
// it hasn't timed out.
ASSERT_TRUE(worker_client->ReplyPushTask(Status::IOError("worker dead")));
ASSERT_EQ(raylet_client->num_workers_returned, 0);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
// Task 2 runs successfully on the second worker; the worker is returned due to the
// timeout.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1001, ClientID::Nil()));
usleep(10 * 1000); // Sleep for 10ms, causing the lease to time out.
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(raylet_client->num_workers_returned, 1);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
// Task 3 runs successfully on the third worker; the worker is returned even though it
// hasn't timed out.
ASSERT_TRUE(raylet_client->GrantWorkerLease("localhost", 1002, ClientID::Nil()));
ASSERT_TRUE(worker_client->ReplyPushTask());
ASSERT_EQ(worker_client->callbacks.size(), 0);
ASSERT_EQ(raylet_client->num_workers_returned, 2);
ASSERT_EQ(raylet_client->num_workers_disconnected, 1);
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/mock_worker.cc
|
C++
|
#define BOOST_BIND_NO_PLACEHOLDERS
#include "ray/core_worker/context.h"
#include "ray/core_worker/core_worker.h"
#include "src/ray/util/test_util.h"
using namespace std::placeholders;
namespace ray {
/// A mock C++ worker used by core_worker_test.cc to verify the task submission/execution
/// interfaces in both single node and cross-nodes scenarios. As the raylet client can
/// only
/// be called by a real worker process, core_worker_test.cc has to use this program binary
/// to start the actual worker process, in the test, the task submission interfaces are
/// called
/// in core_worker_test, and task execution interfaces are called in this file, see that
/// test
/// for more details on how this class is used.
class MockWorker {
public:
MockWorker(const std::string &store_socket, const std::string &raylet_socket,
int node_manager_port, const gcs::GcsClientOptions &gcs_options)
: worker_(WorkerType::WORKER, Language::PYTHON, store_socket, raylet_socket,
JobID::FromInt(1), gcs_options, /*log_dir=*/"",
/*node_id_address=*/"127.0.0.1", node_manager_port,
std::bind(&MockWorker::ExecuteTask, this, _1, _2, _3, _4, _5, _6, _7)) {}
void StartExecutingTasks() { worker_.StartExecutingTasks(); }
private:
Status ExecuteTask(TaskType task_type, const RayFunction &ray_function,
const std::unordered_map<std::string, double> &required_resources,
const std::vector<std::shared_ptr<RayObject>> &args,
const std::vector<ObjectID> &arg_reference_ids,
const std::vector<ObjectID> &return_ids,
std::vector<std::shared_ptr<RayObject>> *results) {
// Note that this doesn't include dummy object id.
const std::vector<std::string> &function_descriptor =
ray_function.GetFunctionDescriptor();
RAY_CHECK(return_ids.size() >= 0 && 1 == function_descriptor.size());
if ("actor creation task" == function_descriptor[0]) {
return Status::OK();
} else if ("GetWorkerPid" == function_descriptor[0]) {
// Get mock worker pid
return GetWorkerPid(results);
} else if ("MergeInputArgsAsOutput" == function_descriptor[0]) {
// Merge input args and write the merged content to each of return ids
return MergeInputArgsAsOutput(args, return_ids, results);
} else {
return Status::TypeError("Unknown function descriptor: " + function_descriptor[0]);
}
}
Status GetWorkerPid(std::vector<std::shared_ptr<RayObject>> *results) {
// Save the pid of current process to the return object.
std::string pid_string = std::to_string(static_cast<int>(getpid()));
auto data =
const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(pid_string.data()));
auto memory_buffer =
std::make_shared<LocalMemoryBuffer>(data, pid_string.size(), true);
results->push_back(std::make_shared<RayObject>(memory_buffer, nullptr));
return Status::OK();
}
Status MergeInputArgsAsOutput(const std::vector<std::shared_ptr<RayObject>> &args,
const std::vector<ObjectID> &return_ids,
std::vector<std::shared_ptr<RayObject>> *results) {
// Merge all the content from input args.
std::vector<uint8_t> buffer;
for (const auto &arg : args) {
auto &data = arg->GetData();
buffer.insert(buffer.end(), data->Data(), data->Data() + data->Size());
}
if (buffer.size() >= 8) {
auto int_arr = reinterpret_cast<int64_t *>(buffer.data());
if (int_arr[0] == SHOULD_CHECK_MESSAGE_ORDER) {
auto seq_no = int_arr[1];
if (seq_no > 0) {
RAY_CHECK(seq_no == prev_seq_no_ + 1) << seq_no << " vs " << prev_seq_no_;
}
prev_seq_no_ = seq_no;
}
}
auto memory_buffer =
std::make_shared<LocalMemoryBuffer>(buffer.data(), buffer.size(), true);
// Write the merged content to each of return ids.
for (size_t i = 0; i < return_ids.size(); i++) {
results->push_back(std::make_shared<RayObject>(memory_buffer, nullptr));
}
return Status::OK();
}
CoreWorker worker_;
int64_t prev_seq_no_ = 0;
};
} // namespace ray
int main(int argc, char **argv) {
RAY_CHECK(argc == 4);
auto store_socket = std::string(argv[1]);
auto raylet_socket = std::string(argv[2]);
auto node_manager_port = std::stoi(std::string(argv[3]));
ray::gcs::GcsClientOptions gcs_options("127.0.0.1", 6379, "");
ray::MockWorker worker(store_socket, raylet_socket, node_manager_port, gcs_options);
worker.StartExecutingTasks();
return 0;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/scheduling_queue_test.cc
|
C++
|
#include <thread>
#include "gtest/gtest.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
namespace ray {
class MockWaiter : public DependencyWaiter {
public:
MockWaiter() {}
void Wait(const std::vector<ObjectID> &dependencies,
std::function<void()> on_dependencies_available) override {
callbacks_.push_back([on_dependencies_available]() { on_dependencies_available(); });
}
void Complete(int index) { callbacks_[index](); }
private:
std::vector<std::function<void()>> callbacks_;
};
TEST(SchedulingQueueTest, TestInOrder) {
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(0, -1, fn_ok, fn_rej);
queue.Add(1, -1, fn_ok, fn_rej);
queue.Add(2, -1, fn_ok, fn_rej);
queue.Add(3, -1, fn_ok, fn_rej);
io_service.run();
ASSERT_EQ(n_ok, 4);
ASSERT_EQ(n_rej, 0);
}
TEST(SchedulingQueueTest, TestWaitForObjects) {
ObjectID obj1 = ObjectID::FromRandom();
ObjectID obj2 = ObjectID::FromRandom();
ObjectID obj3 = ObjectID::FromRandom();
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(0, -1, fn_ok, fn_rej);
queue.Add(1, -1, fn_ok, fn_rej, {obj1});
queue.Add(2, -1, fn_ok, fn_rej, {obj2});
queue.Add(3, -1, fn_ok, fn_rej, {obj3});
ASSERT_EQ(n_ok, 1);
waiter.Complete(0);
ASSERT_EQ(n_ok, 2);
waiter.Complete(2);
ASSERT_EQ(n_ok, 2);
waiter.Complete(1);
ASSERT_EQ(n_ok, 4);
}
TEST(SchedulingQueueTest, TestWaitForObjectsNotSubjectToSeqTimeout) {
ObjectID obj1 = ObjectID::FromRandom();
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(0, -1, fn_ok, fn_rej);
queue.Add(1, -1, fn_ok, fn_rej, {obj1});
ASSERT_EQ(n_ok, 1);
io_service.run();
ASSERT_EQ(n_rej, 0);
waiter.Complete(0);
ASSERT_EQ(n_ok, 2);
}
TEST(SchedulingQueueTest, TestOutOfOrder) {
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(2, -1, fn_ok, fn_rej);
queue.Add(0, -1, fn_ok, fn_rej);
queue.Add(3, -1, fn_ok, fn_rej);
queue.Add(1, -1, fn_ok, fn_rej);
io_service.run();
ASSERT_EQ(n_ok, 4);
ASSERT_EQ(n_rej, 0);
}
TEST(SchedulingQueueTest, TestSeqWaitTimeout) {
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(2, -1, fn_ok, fn_rej);
queue.Add(0, -1, fn_ok, fn_rej);
queue.Add(3, -1, fn_ok, fn_rej);
ASSERT_EQ(n_ok, 1);
ASSERT_EQ(n_rej, 0);
io_service.run(); // immediately triggers timeout
ASSERT_EQ(n_ok, 1);
ASSERT_EQ(n_rej, 2);
queue.Add(4, -1, fn_ok, fn_rej);
queue.Add(5, -1, fn_ok, fn_rej);
ASSERT_EQ(n_ok, 3);
ASSERT_EQ(n_rej, 2);
}
TEST(SchedulingQueueTest, TestSkipAlreadyProcessedByClient) {
boost::asio::io_service io_service;
MockWaiter waiter;
SchedulingQueue queue(io_service, waiter, nullptr, 0);
int n_ok = 0;
int n_rej = 0;
auto fn_ok = [&n_ok]() { n_ok++; };
auto fn_rej = [&n_rej]() { n_rej++; };
queue.Add(2, 2, fn_ok, fn_rej);
queue.Add(3, 2, fn_ok, fn_rej);
queue.Add(1, 2, fn_ok, fn_rej);
io_service.run();
ASSERT_EQ(n_ok, 1);
ASSERT_EQ(n_rej, 2);
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/test/task_manager_test.cc
|
C++
|
#include "ray/core_worker/task_manager.h"
#include "gtest/gtest.h"
#include "ray/common/task/task_spec.h"
#include "ray/core_worker/actor_manager.h"
#include "ray/core_worker/reference_count.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/util/test_util.h"
namespace ray {
TaskSpecification CreateTaskHelper(uint64_t num_returns,
std::vector<ObjectID> dependencies) {
TaskSpecification task;
task.GetMutableMessage().set_task_id(TaskID::ForFakeTask().Binary());
task.GetMutableMessage().set_num_returns(num_returns);
for (const ObjectID &dep : dependencies) {
task.GetMutableMessage().add_args()->add_object_ids(dep.Binary());
}
return task;
}
class MockActorManager : public ActorManagerInterface {
void PublishTerminatedActor(const TaskSpecification &actor_creation_task) override {
num_terminations += 1;
}
int num_terminations = 0;
};
class TaskManagerTest : public ::testing::Test {
public:
TaskManagerTest()
: store_(std::shared_ptr<CoreWorkerMemoryStore>(new CoreWorkerMemoryStore())),
reference_counter_(std::shared_ptr<ReferenceCounter>(new ReferenceCounter())),
actor_manager_(std::shared_ptr<ActorManagerInterface>(new MockActorManager())),
manager_(store_, reference_counter_, actor_manager_,
[this](const TaskSpecification &spec) {
num_retries_++;
return Status::OK();
}) {}
std::shared_ptr<CoreWorkerMemoryStore> store_;
std::shared_ptr<ReferenceCounter> reference_counter_;
std::shared_ptr<ActorManagerInterface> actor_manager_;
TaskManager manager_;
int num_retries_ = 0;
};
TEST_F(TaskManagerTest, TestTaskSuccess) {
TaskID caller_id = TaskID::Nil();
rpc::Address caller_address;
ObjectID dep1 = ObjectID::FromRandom();
ObjectID dep2 = ObjectID::FromRandom();
auto spec = CreateTaskHelper(1, {dep1, dep2});
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
manager_.AddPendingTask(caller_id, caller_address, spec);
ASSERT_TRUE(manager_.IsTaskPending(spec.TaskId()));
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 3);
auto return_id = spec.ReturnId(0, TaskTransportType::DIRECT);
WorkerContext ctx(WorkerType::WORKER, JobID::FromInt(0));
rpc::PushTaskReply reply;
auto return_object = reply.add_return_objects();
return_object->set_object_id(return_id.Binary());
auto data = GenerateRandomBuffer();
return_object->set_data(data->Data(), data->Size());
manager_.CompletePendingTask(spec.TaskId(), reply, nullptr);
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
// Only the return object reference should remain.
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 1);
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(store_->Get({return_id}, 1, -1, ctx, false, &results));
ASSERT_EQ(results.size(), 1);
ASSERT_FALSE(results[0]->IsException());
ASSERT_EQ(std::memcmp(results[0]->GetData()->Data(), return_object->data().data(),
return_object->data().size()),
0);
ASSERT_EQ(num_retries_, 0);
std::vector<ObjectID> removed;
reference_counter_->AddLocalReference(return_id);
reference_counter_->RemoveLocalReference(return_id, &removed);
ASSERT_EQ(removed[0], return_id);
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 0);
}
TEST_F(TaskManagerTest, TestTaskFailure) {
TaskID caller_id = TaskID::Nil();
rpc::Address caller_address;
ObjectID dep1 = ObjectID::FromRandom();
ObjectID dep2 = ObjectID::FromRandom();
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 0);
auto spec = CreateTaskHelper(1, {dep1, dep2});
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
manager_.AddPendingTask(caller_id, caller_address, spec);
ASSERT_TRUE(manager_.IsTaskPending(spec.TaskId()));
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 3);
auto return_id = spec.ReturnId(0, TaskTransportType::DIRECT);
WorkerContext ctx(WorkerType::WORKER, JobID::FromInt(0));
auto error = rpc::ErrorType::WORKER_DIED;
manager_.PendingTaskFailed(spec.TaskId(), error);
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
// Only the return object reference should remain.
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 1);
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(store_->Get({return_id}, 1, -1, ctx, false, &results));
ASSERT_EQ(results.size(), 1);
rpc::ErrorType stored_error;
ASSERT_TRUE(results[0]->IsException(&stored_error));
ASSERT_EQ(stored_error, error);
ASSERT_EQ(num_retries_, 0);
std::vector<ObjectID> removed;
reference_counter_->AddLocalReference(return_id);
reference_counter_->RemoveLocalReference(return_id, &removed);
ASSERT_EQ(removed[0], return_id);
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 0);
}
TEST_F(TaskManagerTest, TestTaskRetry) {
TaskID caller_id = TaskID::Nil();
rpc::Address caller_address;
ObjectID dep1 = ObjectID::FromRandom();
ObjectID dep2 = ObjectID::FromRandom();
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 0);
auto spec = CreateTaskHelper(1, {dep1, dep2});
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
int num_retries = 3;
manager_.AddPendingTask(caller_id, caller_address, spec, num_retries);
ASSERT_TRUE(manager_.IsTaskPending(spec.TaskId()));
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 3);
auto return_id = spec.ReturnId(0, TaskTransportType::DIRECT);
WorkerContext ctx(WorkerType::WORKER, JobID::FromInt(0));
auto error = rpc::ErrorType::WORKER_DIED;
for (int i = 0; i < num_retries; i++) {
manager_.PendingTaskFailed(spec.TaskId(), error);
ASSERT_TRUE(manager_.IsTaskPending(spec.TaskId()));
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 3);
std::vector<std::shared_ptr<RayObject>> results;
ASSERT_FALSE(store_->Get({return_id}, 1, 0, ctx, false, &results).ok());
ASSERT_EQ(num_retries_, i + 1);
}
manager_.PendingTaskFailed(spec.TaskId(), error);
ASSERT_FALSE(manager_.IsTaskPending(spec.TaskId()));
// Only the return object reference should remain.
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 1);
std::vector<std::shared_ptr<RayObject>> results;
RAY_CHECK_OK(store_->Get({return_id}, 1, -0, ctx, false, &results));
ASSERT_EQ(results.size(), 1);
rpc::ErrorType stored_error;
ASSERT_TRUE(results[0]->IsException(&stored_error));
ASSERT_EQ(stored_error, error);
std::vector<ObjectID> removed;
reference_counter_->AddLocalReference(return_id);
reference_counter_->RemoveLocalReference(return_id, &removed);
ASSERT_EQ(removed[0], return_id);
ASSERT_EQ(reference_counter_->NumObjectIDsInScope(), 0);
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/dependency_resolver.cc
|
C++
|
#include "ray/core_worker/transport/dependency_resolver.h"
namespace ray {
struct TaskState {
TaskState(TaskSpecification t,
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> deps)
: task(t), local_dependencies(deps), dependencies_remaining(deps.size()) {}
/// The task to be run.
TaskSpecification task;
/// The local dependencies to resolve for this task. Objects are nullptr if not yet
/// resolved.
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> local_dependencies;
/// Number of local dependencies that aren't yet resolved (have nullptrs in the above
/// map).
size_t dependencies_remaining;
};
void InlineDependencies(
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> dependencies,
TaskSpecification &task, std::vector<ObjectID> *inlined) {
auto &msg = task.GetMutableMessage();
size_t found = 0;
for (size_t i = 0; i < task.NumArgs(); i++) {
auto count = task.ArgIdCount(i);
if (count > 0) {
const auto &id = task.ArgId(i, 0);
const auto &it = dependencies.find(id);
if (it != dependencies.end()) {
RAY_CHECK(it->second);
auto *mutable_arg = msg.mutable_args(i);
mutable_arg->clear_object_ids();
if (it->second->IsInPlasmaError()) {
// Promote the object id to plasma.
mutable_arg->add_object_ids(it->first.Binary());
} else {
// Inline the object value.
if (it->second->HasData()) {
const auto &data = it->second->GetData();
mutable_arg->set_data(data->Data(), data->Size());
}
if (it->second->HasMetadata()) {
const auto &metadata = it->second->GetMetadata();
mutable_arg->set_metadata(metadata->Data(), metadata->Size());
}
inlined->push_back(id);
}
found++;
} else {
RAY_CHECK(!id.IsDirectCallType());
}
}
}
// Each dependency could be inlined more than once.
RAY_CHECK(found >= dependencies.size());
}
void LocalDependencyResolver::ResolveDependencies(TaskSpecification &task,
std::function<void()> on_complete) {
absl::flat_hash_map<ObjectID, std::shared_ptr<RayObject>> local_dependencies;
for (size_t i = 0; i < task.NumArgs(); i++) {
auto count = task.ArgIdCount(i);
if (count > 0) {
RAY_CHECK(count <= 1) << "multi args not implemented";
const auto &id = task.ArgId(i, 0);
if (id.IsDirectCallType()) {
local_dependencies.emplace(id, nullptr);
}
}
}
if (local_dependencies.empty()) {
on_complete();
return;
}
// This is deleted when the last dependency fetch callback finishes.
std::shared_ptr<TaskState> state =
std::make_shared<TaskState>(task, std::move(local_dependencies));
num_pending_ += 1;
for (const auto &it : state->local_dependencies) {
const ObjectID &obj_id = it.first;
in_memory_store_->GetAsync(
obj_id, [this, state, obj_id, on_complete](std::shared_ptr<RayObject> obj) {
RAY_CHECK(obj != nullptr);
bool complete = false;
std::vector<ObjectID> inlined;
{
absl::MutexLock lock(&mu_);
state->local_dependencies[obj_id] = std::move(obj);
if (--state->dependencies_remaining == 0) {
InlineDependencies(state->local_dependencies, state->task, &inlined);
complete = true;
num_pending_ -= 1;
}
}
if (inlined.size() > 0) {
task_finisher_->OnTaskDependenciesInlined(inlined);
}
if (complete) {
on_complete();
}
});
}
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/dependency_resolver.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_DEPENDENCY_RESOLVER_H
#define RAY_CORE_WORKER_DEPENDENCY_RESOLVER_H
#include <memory>
#include "ray/common/id.h"
#include "ray/common/task/task_spec.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/task_manager.h"
namespace ray {
// This class is thread-safe.
class LocalDependencyResolver {
public:
LocalDependencyResolver(std::shared_ptr<CoreWorkerMemoryStore> store,
std::shared_ptr<TaskFinisherInterface> task_finisher)
: in_memory_store_(store), task_finisher_(task_finisher), num_pending_(0) {}
/// Resolve all local and remote dependencies for the task, calling the specified
/// callback when done. Direct call ids in the task specification will be resolved
/// to concrete values and inlined.
//
/// Note: This method **will mutate** the given TaskSpecification.
///
/// Postcondition: all direct call id arguments that haven't been spilled to plasma
/// are converted to values and all remaining arguments are arguments in the task spec.
void ResolveDependencies(TaskSpecification &task, std::function<void()> on_complete);
/// Return the number of tasks pending dependency resolution.
/// TODO(ekl) this should be exposed in worker stats.
int NumPendingTasks() const { return num_pending_; }
private:
/// The in-memory store.
std::shared_ptr<CoreWorkerMemoryStore> in_memory_store_;
/// Used to complete tasks.
std::shared_ptr<TaskFinisherInterface> task_finisher_;
/// Number of tasks pending dependency resolution.
std::atomic<int> num_pending_;
/// Protects against concurrent access to internal state.
absl::Mutex mu_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_DEPENDENCY_RESOLVER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/direct_actor_transport.cc
|
C++
|
#include "ray/core_worker/transport/direct_actor_transport.h"
#include <thread>
#include "ray/common/task/task.h"
using ray::rpc::ActorTableData;
namespace ray {
Status CoreWorkerDirectActorTaskSubmitter::KillActor(const ActorID &actor_id) {
absl::MutexLock lock(&mu_);
pending_force_kills_.insert(actor_id);
auto it = rpc_clients_.find(actor_id);
if (it == rpc_clients_.end()) {
// Actor is not yet created, or is being reconstructed, cache the request
// and submit after actor is alive.
// TODO(zhijunfu): it might be possible for a user to specify an invalid
// actor handle (e.g. from unpickling), in that case it might be desirable
// to have a timeout to mark it as invalid if it doesn't show up in the
// specified time.
RAY_LOG(DEBUG) << "Actor " << actor_id << " is not yet created.";
} else {
SendPendingTasks(actor_id);
}
return Status::OK();
}
Status CoreWorkerDirectActorTaskSubmitter::SubmitTask(TaskSpecification task_spec) {
RAY_LOG(DEBUG) << "Submitting task " << task_spec.TaskId();
RAY_CHECK(task_spec.IsActorTask());
// We must fix the send order prior to resolving dependencies, which may complete
// out of order. This ensures we preserve the client-side send order.
int64_t send_pos = -1;
{
absl::MutexLock lock(&mu_);
send_pos = next_send_position_to_assign_[task_spec.ActorId()]++;
}
resolver_.ResolveDependencies(task_spec, [this, send_pos, task_spec]() mutable {
const auto &actor_id = task_spec.ActorId();
auto request = std::unique_ptr<rpc::PushTaskRequest>(new rpc::PushTaskRequest);
request->mutable_caller_address()->CopyFrom(rpc_address_);
// NOTE(swang): CopyFrom is needed because if we use Swap here and the task
// fails, then the task data will be gone when the TaskManager attempts to
// access the task.
request->mutable_task_spec()->CopyFrom(task_spec.GetMessage());
absl::MutexLock lock(&mu_);
auto inserted = pending_requests_[actor_id].emplace(send_pos, std::move(request));
RAY_CHECK(inserted.second);
auto it = rpc_clients_.find(actor_id);
if (it == rpc_clients_.end()) {
// Actor is not yet created, or is being reconstructed, cache the request
// and submit after actor is alive.
// TODO(zhijunfu): it might be possible for a user to specify an invalid
// actor handle (e.g. from unpickling), in that case it might be desirable
// to have a timeout to mark it as invalid if it doesn't show up in the
// specified time.
RAY_LOG(DEBUG) << "Actor " << actor_id << " is not yet created.";
} else {
SendPendingTasks(actor_id);
}
});
// If the task submission subsequently fails, then the client will receive
// the error in a callback.
return Status::OK();
}
void CoreWorkerDirectActorTaskSubmitter::ConnectActor(const ActorID &actor_id,
const rpc::Address &address) {
absl::MutexLock lock(&mu_);
// Update the mapping so new RPCs go out with the right intended worker id.
worker_ids_[actor_id] = address.worker_id();
// Create a new connection to the actor.
// TODO(edoakes): are these clients cleaned up properly?
if (rpc_clients_.count(actor_id) == 0) {
rpc_clients_[actor_id] = std::shared_ptr<rpc::CoreWorkerClientInterface>(
client_factory_(address.ip_address(), address.port()));
}
if (pending_requests_.count(actor_id) > 0) {
SendPendingTasks(actor_id);
}
}
void CoreWorkerDirectActorTaskSubmitter::DisconnectActor(const ActorID &actor_id,
bool dead) {
absl::MutexLock lock(&mu_);
if (!dead) {
// We're reconstructing the actor, so erase the client for now. The new client
// will be inserted once actor reconstruction completes. We don't erase the
// client when the actor is DEAD, so that all further tasks will be failed.
rpc_clients_.erase(actor_id);
worker_ids_.erase(actor_id);
} else {
RAY_LOG(INFO) << "Failing pending tasks for actor " << actor_id;
// If there are pending requests, treat the pending tasks as failed.
auto pending_it = pending_requests_.find(actor_id);
if (pending_it != pending_requests_.end()) {
auto head = pending_it->second.begin();
while (head != pending_it->second.end()) {
auto request = std::move(head->second);
head = pending_it->second.erase(head);
auto task_id = TaskID::FromBinary(request->task_spec().task_id());
auto status = Status::IOError("cancelling all pending tasks of dead actor");
task_finisher_->PendingTaskFailed(task_id, rpc::ErrorType::ACTOR_DIED, &status);
}
pending_requests_.erase(pending_it);
}
// No need to clean up tasks that have been sent and are waiting for
// replies. They will be treated as failed once the connection dies.
// We retain the sequencing information so that we can properly fail
// any tasks submitted after the actor death.
}
}
void CoreWorkerDirectActorTaskSubmitter::SendPendingTasks(const ActorID &actor_id) {
auto &client = rpc_clients_[actor_id];
RAY_CHECK(client);
// Check if there is a pending force kill. If there is, send it and disconnect the
// client.
if (pending_force_kills_.find(actor_id) != pending_force_kills_.end()) {
rpc::KillActorRequest request;
request.set_intended_actor_id(actor_id.Binary());
RAY_CHECK_OK(client->KillActor(request, nullptr));
pending_force_kills_.erase(actor_id);
}
// Submit all pending requests.
auto &requests = pending_requests_[actor_id];
auto head = requests.begin();
while (head != requests.end() && head->first == next_send_position_[actor_id]) {
auto request = std::move(head->second);
head = requests.erase(head);
auto num_returns = request->task_spec().num_returns();
auto task_id = TaskID::FromBinary(request->task_spec().task_id());
PushActorTask(*client, std::move(request), actor_id, task_id, num_returns);
}
}
void CoreWorkerDirectActorTaskSubmitter::PushActorTask(
rpc::CoreWorkerClientInterface &client, std::unique_ptr<rpc::PushTaskRequest> request,
const ActorID &actor_id, const TaskID &task_id, int num_returns) {
RAY_LOG(DEBUG) << "Pushing task " << task_id << " to actor " << actor_id;
next_send_position_[actor_id]++;
auto it = worker_ids_.find(actor_id);
RAY_CHECK(it != worker_ids_.end()) << "Actor worker id not found " << actor_id.Hex();
request->set_intended_worker_id(it->second);
RAY_CHECK_OK(client.PushActorTask(
std::move(request),
[this, task_id](Status status, const rpc::PushTaskReply &reply) {
if (!status.ok()) {
task_finisher_->PendingTaskFailed(task_id, rpc::ErrorType::ACTOR_DIED, &status);
} else {
task_finisher_->CompletePendingTask(task_id, reply, nullptr);
}
}));
}
bool CoreWorkerDirectActorTaskSubmitter::IsActorAlive(const ActorID &actor_id) const {
absl::MutexLock lock(&mu_);
auto iter = rpc_clients_.find(actor_id);
return (iter != rpc_clients_.end());
}
void CoreWorkerDirectTaskReceiver::Init(rpc::ClientFactoryFn client_factory,
rpc::Address rpc_address) {
waiter_.reset(new DependencyWaiterImpl(*local_raylet_client_));
rpc_address_ = rpc_address;
client_factory_ = client_factory;
}
void CoreWorkerDirectTaskReceiver::SetMaxActorConcurrency(int max_concurrency) {
if (max_concurrency != max_concurrency_) {
RAY_LOG(INFO) << "Creating new thread pool of size " << max_concurrency;
RAY_CHECK(pool_ == nullptr) << "Cannot change max concurrency at runtime.";
pool_.reset(new BoundedExecutor(max_concurrency));
max_concurrency_ = max_concurrency;
}
}
void CoreWorkerDirectTaskReceiver::SetActorAsAsync(int max_concurrency) {
if (!is_asyncio_) {
RAY_LOG(DEBUG) << "Setting direct actor as async, creating new fiber thread.";
// The main thread will be used the creating new fibers.
// The fiber_runner_thread_ will run all fibers.
// boost::fibers::algo::shared_work allows two threads to transparently
// share all the fibers.
boost::fibers::use_scheduling_algorithm<boost::fibers::algo::shared_work>();
fiber_runner_thread_ = std::thread([&]() {
boost::fibers::use_scheduling_algorithm<boost::fibers::algo::shared_work>();
// The event here is used to make sure fiber_runner_thread_ never terminates.
// Because fiber_shutdown_event_ is never notified, fiber_runner_thread_ will
// immediately start working on any ready fibers.
fiber_shutdown_event_.Wait();
});
fiber_rate_limiter_.reset(new FiberRateLimiter(max_concurrency));
max_concurrency_ = max_concurrency;
is_asyncio_ = true;
}
};
void CoreWorkerDirectTaskReceiver::HandlePushTask(
const rpc::PushTaskRequest &request, rpc::PushTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) {
RAY_CHECK(waiter_ != nullptr) << "Must call init() prior to use";
const TaskSpecification task_spec(request.task_spec());
RAY_LOG(DEBUG) << "Received task " << task_spec.DebugString();
if (task_spec.IsActorTask() && !worker_context_.CurrentTaskIsDirectCall()) {
send_reply_callback(Status::Invalid("This actor doesn't accept direct calls."),
nullptr, nullptr);
return;
}
// Only call SetMaxActorConcurrency to configure threadpool size when the
// actor is not async actor. Async actor is single threaded.
if (worker_context_.CurrentActorIsAsync()) {
SetActorAsAsync(worker_context_.CurrentActorMaxConcurrency());
} else {
SetMaxActorConcurrency(worker_context_.CurrentActorMaxConcurrency());
}
std::vector<ObjectID> dependencies;
for (size_t i = 0; i < task_spec.NumArgs(); ++i) {
int count = task_spec.ArgIdCount(i);
for (int j = 0; j < count; j++) {
dependencies.push_back(task_spec.ArgId(i, j));
}
}
// Only assign resources for non-actor tasks. Actor tasks inherit the resources
// assigned at initial actor creation time.
std::shared_ptr<ResourceMappingType> resource_ids;
if (!task_spec.IsActorTask()) {
resource_ids.reset(new ResourceMappingType());
for (const auto &mapping : request.resource_mapping()) {
std::vector<std::pair<int64_t, double>> rids;
for (const auto &ids : mapping.resource_ids()) {
rids.push_back(std::make_pair(ids.index(), ids.quantity()));
}
(*resource_ids)[mapping.name()] = rids;
}
}
const rpc::Address &caller_address = request.caller_address();
auto accept_callback = [this, caller_address, reply, send_reply_callback, task_spec,
resource_ids]() {
// We have posted an exit task onto the main event loop,
// so shouldn't bother executing any further work.
if (exiting_) return;
auto num_returns = task_spec.NumReturns();
if (task_spec.IsActorCreationTask() || task_spec.IsActorTask()) {
// Decrease to account for the dummy object id.
num_returns--;
}
RAY_CHECK(num_returns >= 0);
std::vector<std::shared_ptr<RayObject>> return_objects;
auto status = task_handler_(task_spec, resource_ids, &return_objects);
bool objects_valid = return_objects.size() == num_returns;
if (objects_valid) {
std::vector<ObjectID> plasma_return_ids;
for (size_t i = 0; i < return_objects.size(); i++) {
auto return_object = reply->add_return_objects();
ObjectID id = ObjectID::ForTaskReturn(
task_spec.TaskId(), /*index=*/i + 1,
/*transport_type=*/static_cast<int>(TaskTransportType::DIRECT));
return_object->set_object_id(id.Binary());
// The object is nullptr if it already existed in the object store.
const auto &result = return_objects[i];
if (result == nullptr || result->GetData()->IsPlasmaBuffer()) {
return_object->set_in_plasma(true);
plasma_return_ids.push_back(id);
} else {
if (result->GetData() != nullptr) {
return_object->set_data(result->GetData()->Data(), result->GetData()->Size());
}
if (result->GetMetadata() != nullptr) {
return_object->set_metadata(result->GetMetadata()->Data(),
result->GetMetadata()->Size());
}
}
}
// If we spilled any return objects to plasma, notify the raylet to pin them.
// The raylet will then coordinate with the caller to manage the objects'
// lifetimes.
// TODO(edoakes): the plasma objects could be evicted between creating them
// here and when raylet pins them.
if (!plasma_return_ids.empty()) {
RAY_CHECK_OK(
local_raylet_client_->PinObjectIDs(caller_address, plasma_return_ids));
}
if (task_spec.IsActorCreationTask()) {
RAY_LOG(INFO) << "Actor creation task finished, task_id: " << task_spec.TaskId()
<< ", actor_id: " << task_spec.ActorCreationId();
// Tell raylet that an actor creation task has finished execution, so that
// raylet can publish actor creation event to GCS, and mark this worker as
// actor, thus if this worker dies later raylet will reconstruct the actor.
RAY_CHECK_OK(local_raylet_client_->TaskDone());
}
}
if (status.IsSystemExit()) {
// Don't allow the worker to be reused, even though the reply status is OK.
// The worker will be shutting down shortly.
reply->set_worker_exiting(true);
// In Python, SystemExit can only be raised on the main thread. To
// work around this when we are executing tasks on worker threads,
// we re-post the exit event explicitly on the main thread.
exiting_ = true;
if (objects_valid) {
// This happens when max_calls is hit. We still need to return the objects.
send_reply_callback(Status::OK(), nullptr, nullptr);
} else {
send_reply_callback(status, nullptr, nullptr);
}
task_main_io_service_.post(
[this, status]() { exit_handler_(status.IsIntentionalSystemExit()); });
} else {
RAY_CHECK(objects_valid) << return_objects.size() << " " << num_returns;
send_reply_callback(status, nullptr, nullptr);
}
};
// Run actor creation task immediately on the main thread, without going
// through a scheduling queue.
if (task_spec.IsActorCreationTask()) {
accept_callback();
return;
}
auto reject_callback = [send_reply_callback]() {
send_reply_callback(Status::Invalid("client cancelled stale rpc"), nullptr, nullptr);
};
auto it = scheduling_queue_.find(task_spec.CallerId());
if (it == scheduling_queue_.end()) {
auto result = scheduling_queue_.emplace(
task_spec.CallerId(),
std::unique_ptr<SchedulingQueue>(new SchedulingQueue(
task_main_io_service_, *waiter_, pool_, is_asyncio_, fiber_rate_limiter_)));
it = result.first;
}
it->second->Add(request.sequence_number(), request.client_processed_up_to(),
accept_callback, reject_callback, dependencies);
}
void CoreWorkerDirectTaskReceiver::HandleDirectActorCallArgWaitComplete(
const rpc::DirectActorCallArgWaitCompleteRequest &request,
rpc::DirectActorCallArgWaitCompleteReply *reply,
rpc::SendReplyCallback send_reply_callback) {
RAY_LOG(DEBUG) << "Arg wait complete for tag " << request.tag();
waiter_->OnWaitComplete(request.tag());
send_reply_callback(Status::OK(), nullptr, nullptr);
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/direct_actor_transport.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_DIRECT_ACTOR_TRANSPORT_H
#define RAY_CORE_WORKER_DIRECT_ACTOR_TRANSPORT_H
#include <boost/asio/thread_pool.hpp>
#include <boost/fiber/all.hpp>
#include <boost/thread.hpp>
#include <list>
#include <queue>
#include <set>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "ray/common/id.h"
#include "ray/common/ray_object.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/task_manager.h"
#include "ray/core_worker/transport/dependency_resolver.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/grpc_server.h"
#include "ray/rpc/worker/core_worker_client.h"
namespace {} // namespace
namespace ray {
/// The max time to wait for out-of-order tasks.
const int kMaxReorderWaitSeconds = 30;
/// In direct actor call task submitter and receiver, a task is directly submitted
/// to the actor that will execute it.
// This class is thread-safe.
class CoreWorkerDirectActorTaskSubmitter {
public:
CoreWorkerDirectActorTaskSubmitter(rpc::Address rpc_address,
rpc::ClientFactoryFn client_factory,
std::shared_ptr<CoreWorkerMemoryStore> store,
std::shared_ptr<TaskFinisherInterface> task_finisher)
: rpc_address_(rpc_address),
client_factory_(client_factory),
resolver_(store, task_finisher),
task_finisher_(task_finisher) {}
/// Submit a task to an actor for execution.
///
/// \param[in] task The task spec to submit.
/// \return Status::Invalid if the task is not yet supported.
Status SubmitTask(TaskSpecification task_spec);
/// Tell this actor to exit immediately.
///
/// \param[in] actor_id The actor_id of the actor to kill.
/// \return Status::Invalid if the actor could not be killed.
Status KillActor(const ActorID &actor_id);
/// Create connection to actor and send all pending tasks.
///
/// \param[in] actor_id Actor ID.
/// \param[in] address The new address of the actor.
void ConnectActor(const ActorID &actor_id, const rpc::Address &address);
/// Disconnect from a failed actor.
///
/// \param[in] actor_id Actor ID.
void DisconnectActor(const ActorID &actor_id, bool dead = false);
private:
/// Push a task to a remote actor via the given client.
/// Note, this function doesn't return any error status code. If an error occurs while
/// sending the request, this task will be treated as failed.
///
/// \param[in] client The RPC client to send tasks to an actor.
/// \param[in] request The request to send.
/// \param[in] actor_id Actor ID.
/// \param[in] task_id The ID of a task.
/// \param[in] num_returns Number of return objects.
/// \return Void.
void PushActorTask(rpc::CoreWorkerClientInterface &client,
std::unique_ptr<rpc::PushTaskRequest> request,
const ActorID &actor_id, const TaskID &task_id, int num_returns)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Send all pending tasks for an actor.
/// Note that this function doesn't take lock, the caller is expected to hold
/// `mutex_` before calling this function.
///
/// \param[in] actor_id Actor ID.
/// \return Void.
void SendPendingTasks(const ActorID &actor_id) EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Whether the specified actor is alive.
///
/// \param[in] actor_id The actor ID.
/// \return Whether this actor is alive.
bool IsActorAlive(const ActorID &actor_id) const;
/// Factory for producing new core worker clients.
rpc::ClientFactoryFn client_factory_;
/// Mutex to proect the various maps below.
mutable absl::Mutex mu_;
/// Address of our RPC server.
rpc::Address rpc_address_;
/// Map from actor id to rpc client. This only includes actors that we send tasks to.
/// We use shared_ptr to enable shared_from_this for pending client callbacks.
///
/// TODO(zhijunfu): this will be moved into `actor_states_` later when we can
/// subscribe updates for a specific actor.
absl::flat_hash_map<ActorID, std::shared_ptr<rpc::CoreWorkerClientInterface>>
rpc_clients_ GUARDED_BY(mu_);
/// Map from actor ids to worker ids. TODO(ekl) consider unifying this with the
/// rpc_clients_ map.
absl::flat_hash_map<ActorID, std::string> worker_ids_ GUARDED_BY(mu_);
/// Set of actor ids that should be force killed once a client is available.
absl::flat_hash_set<ActorID> pending_force_kills_ GUARDED_BY(mu_);
/// Map from actor id to the actor's pending requests. Each actor's requests
/// are ordered by the task number in the request.
absl::flat_hash_map<ActorID, std::map<int64_t, std::unique_ptr<rpc::PushTaskRequest>>>
pending_requests_ GUARDED_BY(mu_);
/// Map from actor id to the send position of the next task to queue for send
/// for that actor. This is always greater than or equal to next_send_position_.
absl::flat_hash_map<ActorID, int64_t> next_send_position_to_assign_ GUARDED_BY(mu_);
/// Map from actor id to the send position of the next task to send to that actor.
/// Note that this differs from the PushTaskRequest's sequence number in that it
/// increases monotonically in this process independently of CallerId changes.
absl::flat_hash_map<ActorID, int64_t> next_send_position_ GUARDED_BY(mu_);
/// Resolve direct call object dependencies;
LocalDependencyResolver resolver_;
/// Used to complete tasks.
std::shared_ptr<TaskFinisherInterface> task_finisher_;
friend class CoreWorkerTest;
};
/// Object dependency and RPC state of an inbound request.
class InboundRequest {
public:
InboundRequest(){};
InboundRequest(std::function<void()> accept_callback,
std::function<void()> reject_callback, bool has_dependencies)
: accept_callback_(accept_callback),
reject_callback_(reject_callback),
has_pending_dependencies_(has_dependencies) {}
void Accept() { accept_callback_(); }
void Cancel() { reject_callback_(); }
bool CanExecute() const { return !has_pending_dependencies_; }
void MarkDependenciesSatisfied() { has_pending_dependencies_ = false; }
private:
std::function<void()> accept_callback_;
std::function<void()> reject_callback_;
bool has_pending_dependencies_;
};
/// Waits for an object dependency to become available. Abstract for testing.
class DependencyWaiter {
public:
/// Calls `callback` once the specified objects become available.
virtual void Wait(const std::vector<ObjectID> &dependencies,
std::function<void()> on_dependencies_available) = 0;
};
class DependencyWaiterImpl : public DependencyWaiter {
public:
DependencyWaiterImpl(raylet::RayletClient &local_raylet_client)
: local_raylet_client_(local_raylet_client) {}
void Wait(const std::vector<ObjectID> &dependencies,
std::function<void()> on_dependencies_available) override {
auto tag = next_request_id_++;
requests_[tag] = on_dependencies_available;
local_raylet_client_.WaitForDirectActorCallArgs(dependencies, tag);
}
/// Fulfills the callback stored by Wait().
void OnWaitComplete(int64_t tag) {
auto it = requests_.find(tag);
RAY_CHECK(it != requests_.end());
it->second();
requests_.erase(it);
}
private:
int64_t next_request_id_ = 0;
std::unordered_map<int64_t, std::function<void()>> requests_;
raylet::RayletClient &local_raylet_client_;
};
/// Wraps a thread-pool to block posts until the pool has free slots. This is used
/// by the SchedulingQueue to provide backpressure to clients.
class BoundedExecutor {
public:
BoundedExecutor(int max_concurrency)
: num_running_(0), max_concurrency_(max_concurrency), pool_(max_concurrency){};
/// Posts work to the pool, blocking if no free threads are available.
void PostBlocking(std::function<void()> fn) {
mu_.LockWhen(absl::Condition(this, &BoundedExecutor::ThreadsAvailable));
num_running_ += 1;
mu_.Unlock();
boost::asio::post(pool_, [this, fn]() {
fn();
absl::MutexLock lock(&mu_);
num_running_ -= 1;
});
}
private:
bool ThreadsAvailable() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return num_running_ < max_concurrency_;
}
/// Protects access to the counters below.
absl::Mutex mu_;
/// The number of currently running tasks.
int num_running_ GUARDED_BY(mu_);
/// The max number of concurrently running tasks allowed.
const int max_concurrency_;
/// The underlying thread pool for running tasks.
boost::asio::thread_pool pool_;
};
/// Used by async actor mode. The fiber event will be used
/// from python to switch control among different coroutines.
/// Taken from boost::fiber examples
/// https://github.com/boostorg/fiber/blob/7be4f860e733a92d2fa80a848dd110df009a20e1/examples/wait_stuff.cpp#L115-L142
class FiberEvent {
public:
// Block the fiber until the event is notified.
void Wait() {
std::unique_lock<boost::fibers::mutex> lock(mutex_);
cond_.wait(lock, [this]() { return ready_; });
}
// Notify the event and unblock all waiters.
void Notify() {
{
std::unique_lock<boost::fibers::mutex> lock(mutex_);
ready_ = true;
}
cond_.notify_one();
}
private:
boost::fibers::condition_variable cond_;
boost::fibers::mutex mutex_;
bool ready_ = false;
};
/// Used by async actor mode. The FiberRateLimiter is a barrier that
/// allows at most num fibers running at once. It implements the
/// semaphore data structure.
class FiberRateLimiter {
public:
FiberRateLimiter(int num) : num_(num) {}
// Enter the semaphore. Wait fo the value to be > 0 and decrement the value.
void Acquire() {
std::unique_lock<boost::fibers::mutex> lock(mutex_);
cond_.wait(lock, [this]() { return num_ > 0; });
num_ -= 1;
}
// Exit the semaphore. Increment the value and notify other waiter.
void Release() {
{
std::unique_lock<boost::fibers::mutex> lock(mutex_);
num_ += 1;
}
// TODO(simon): This not does guarantee to wake up the first queued fiber.
// This could be a problem for certain workloads because there is no guarantee
// on task ordering .
cond_.notify_one();
}
private:
boost::fibers::condition_variable cond_;
boost::fibers::mutex mutex_;
int num_ = 1;
};
/// Used to ensure serial order of task execution per actor handle.
/// See direct_actor.proto for a description of the ordering protocol.
class SchedulingQueue {
public:
SchedulingQueue(boost::asio::io_service &main_io_service, DependencyWaiter &waiter,
std::shared_ptr<BoundedExecutor> pool = nullptr,
bool use_asyncio = false,
std::shared_ptr<FiberRateLimiter> fiber_rate_limiter = nullptr,
int64_t reorder_wait_seconds = kMaxReorderWaitSeconds)
: wait_timer_(main_io_service),
waiter_(waiter),
reorder_wait_seconds_(reorder_wait_seconds),
main_thread_id_(boost::this_thread::get_id()),
pool_(pool),
use_asyncio_(use_asyncio),
fiber_rate_limiter_(fiber_rate_limiter) {}
void Add(int64_t seq_no, int64_t client_processed_up_to,
std::function<void()> accept_request, std::function<void()> reject_request,
const std::vector<ObjectID> &dependencies = {}) {
if (seq_no == -1) {
accept_request(); // A seq_no of -1 means no ordering constraint.
return;
}
RAY_CHECK(boost::this_thread::get_id() == main_thread_id_);
if (client_processed_up_to >= next_seq_no_) {
RAY_LOG(ERROR) << "client skipping requests " << next_seq_no_ << " to "
<< client_processed_up_to;
next_seq_no_ = client_processed_up_to + 1;
}
RAY_LOG(DEBUG) << "Enqueue " << seq_no << " cur seqno " << next_seq_no_;
pending_tasks_[seq_no] =
InboundRequest(accept_request, reject_request, dependencies.size() > 0);
if (dependencies.size() > 0) {
waiter_.Wait(dependencies, [seq_no, this]() {
RAY_CHECK(boost::this_thread::get_id() == main_thread_id_);
auto it = pending_tasks_.find(seq_no);
if (it != pending_tasks_.end()) {
it->second.MarkDependenciesSatisfied();
ScheduleRequests();
}
});
}
ScheduleRequests();
}
private:
/// Schedules as many requests as possible in sequence.
void ScheduleRequests() {
// Cancel any stale requests that the client doesn't need any longer.
while (!pending_tasks_.empty() && pending_tasks_.begin()->first < next_seq_no_) {
auto head = pending_tasks_.begin();
RAY_LOG(ERROR) << "Cancelling stale RPC with seqno "
<< pending_tasks_.begin()->first << " < " << next_seq_no_;
head->second.Cancel();
pending_tasks_.erase(head);
}
// Process as many in-order requests as we can.
while (!pending_tasks_.empty() && pending_tasks_.begin()->first == next_seq_no_ &&
pending_tasks_.begin()->second.CanExecute()) {
auto head = pending_tasks_.begin();
auto request = head->second;
if (use_asyncio_) {
boost::fibers::fiber([request, this]() mutable {
fiber_rate_limiter_->Acquire();
request.Accept();
fiber_rate_limiter_->Release();
})
.detach();
} else if (pool_ != nullptr) {
pool_->PostBlocking([request]() mutable { request.Accept(); });
} else {
request.Accept();
}
pending_tasks_.erase(head);
next_seq_no_++;
}
if (pending_tasks_.empty() || !pending_tasks_.begin()->second.CanExecute()) {
// No timeout for object dependency waits.
wait_timer_.cancel();
} else {
// Set a timeout on the queued tasks to avoid an infinite wait on failure.
wait_timer_.expires_from_now(boost::posix_time::seconds(reorder_wait_seconds_));
RAY_LOG(DEBUG) << "waiting for " << next_seq_no_ << " queue size "
<< pending_tasks_.size();
wait_timer_.async_wait([this](const boost::system::error_code &error) {
if (error == boost::asio::error::operation_aborted) {
return; // time deadline was adjusted
}
OnSequencingWaitTimeout();
});
}
}
/// Called when we time out waiting for an earlier task to show up.
void OnSequencingWaitTimeout() {
RAY_CHECK(boost::this_thread::get_id() == main_thread_id_);
RAY_LOG(ERROR) << "timed out waiting for " << next_seq_no_
<< ", cancelling all queued tasks";
while (!pending_tasks_.empty()) {
auto head = pending_tasks_.begin();
head->second.Cancel();
pending_tasks_.erase(head);
next_seq_no_ = std::max(next_seq_no_, head->first + 1);
}
}
/// Max time in seconds to wait for dependencies to show up.
const int64_t reorder_wait_seconds_ = 0;
/// Sorted map of (accept, rej) task callbacks keyed by their sequence number.
std::map<int64_t, InboundRequest> pending_tasks_;
/// The next sequence number we are waiting for to arrive.
int64_t next_seq_no_ = 0;
/// Timer for waiting on dependencies. Note that this is set on the task main
/// io service, which is fine since it only ever fires if no tasks are running.
boost::asio::deadline_timer wait_timer_;
/// The id of the thread that constructed this scheduling queue.
boost::thread::id main_thread_id_;
/// Reference to the waiter owned by the task receiver.
DependencyWaiter &waiter_;
/// If concurrent calls are allowed, holds the pool for executing these tasks.
std::shared_ptr<BoundedExecutor> pool_;
/// Whether we should enqueue requests into asyncio pool. Setting this to true
/// will instantiate all tasks as fibers that can be yielded.
bool use_asyncio_;
/// If use_asyncio_ is true, fiber_rate_limiter_ limits the max number of async
/// tasks running at once.
std::shared_ptr<FiberRateLimiter> fiber_rate_limiter_;
friend class SchedulingQueueTest;
};
class CoreWorkerDirectTaskReceiver {
public:
using TaskHandler =
std::function<Status(const TaskSpecification &task_spec,
const std::shared_ptr<ResourceMappingType> resource_ids,
std::vector<std::shared_ptr<RayObject>> *return_objects)>;
CoreWorkerDirectTaskReceiver(WorkerContext &worker_context,
std::shared_ptr<raylet::RayletClient> &local_raylet_client,
boost::asio::io_service &main_io_service,
const TaskHandler &task_handler,
const std::function<void(bool)> &exit_handler)
: worker_context_(worker_context),
local_raylet_client_(local_raylet_client),
task_handler_(task_handler),
exit_handler_(exit_handler),
task_main_io_service_(main_io_service) {}
~CoreWorkerDirectTaskReceiver() {
fiber_shutdown_event_.Notify();
// Only join the fiber thread if it was spawned in the first place.
if (fiber_runner_thread_.joinable()) {
fiber_runner_thread_.join();
}
}
/// Initialize this receiver. This must be called prior to use.
void Init(rpc::ClientFactoryFn client_factory, rpc::Address rpc_address);
/// Handle a `PushTask` request.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
void HandlePushTask(const rpc::PushTaskRequest &request, rpc::PushTaskReply *reply,
rpc::SendReplyCallback send_reply_callback);
/// Handle a `DirectActorCallArgWaitComplete` request.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
void HandleDirectActorCallArgWaitComplete(
const rpc::DirectActorCallArgWaitCompleteRequest &request,
rpc::DirectActorCallArgWaitCompleteReply *reply,
rpc::SendReplyCallback send_reply_callback);
/// Set the max concurrency at runtime. It cannot be changed once set.
void SetMaxActorConcurrency(int max_concurrency);
/// Set the max concurrency and start async actor context.
void SetActorAsAsync(int max_concurrency);
private:
// Worker context.
WorkerContext &worker_context_;
/// The callback function to process a task.
TaskHandler task_handler_;
/// The callback function to exit the worker.
std::function<void(bool)> exit_handler_;
/// The IO event loop for running tasks on.
boost::asio::io_service &task_main_io_service_;
/// Factory for producing new core worker clients.
rpc::ClientFactoryFn client_factory_;
/// Address of our RPC server.
rpc::Address rpc_address_;
/// Reference to the core worker's raylet client. This is a pointer ref so that it
/// can be initialized by core worker after this class is constructed.
std::shared_ptr<raylet::RayletClient> &local_raylet_client_;
/// Shared waiter for dependencies required by incoming tasks.
std::unique_ptr<DependencyWaiterImpl> waiter_;
/// Queue of pending requests per actor handle.
/// TODO(ekl) GC these queues once the handle is no longer active.
std::unordered_map<TaskID, std::unique_ptr<SchedulingQueue>> scheduling_queue_;
/// The max number of concurrent calls to allow.
int max_concurrency_ = 1;
/// Whether we are shutting down and not running further tasks.
bool exiting_ = false;
/// If concurrent calls are allowed, holds the pool for executing these tasks.
std::shared_ptr<BoundedExecutor> pool_;
/// Whether this actor use asyncio for concurrency.
/// TODO(simon) group all asyncio related fields into a separate struct.
bool is_asyncio_ = false;
/// The thread that runs all asyncio fibers. is_asyncio_ must be true.
std::thread fiber_runner_thread_;
/// The fiber event used to block fiber_runner_thread_ from shutdown.
/// is_asyncio_ must be true.
FiberEvent fiber_shutdown_event_;
/// The fiber semaphore used to limit the number of concurrent fibers
/// running at once.
std::shared_ptr<FiberRateLimiter> fiber_rate_limiter_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_DIRECT_ACTOR_TRANSPORT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/direct_task_transport.cc
|
C++
|
#include "ray/core_worker/transport/direct_task_transport.h"
#include "ray/core_worker/transport/dependency_resolver.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
namespace ray {
Status CoreWorkerDirectTaskSubmitter::SubmitTask(TaskSpecification task_spec) {
RAY_LOG(DEBUG) << "Submit task " << task_spec.TaskId();
resolver_.ResolveDependencies(task_spec, [this, task_spec]() {
RAY_LOG(DEBUG) << "Task dependencies resolved " << task_spec.TaskId();
absl::MutexLock lock(&mu_);
// Note that the dependencies in the task spec are mutated to only contain
// plasma dependencies after ResolveDependencies finishes.
const SchedulingKey scheduling_key(
task_spec.GetSchedulingClass(), task_spec.GetDependencies(),
task_spec.IsActorCreationTask() ? task_spec.ActorCreationId() : ActorID::Nil());
auto it = task_queues_.find(scheduling_key);
if (it == task_queues_.end()) {
it = task_queues_.emplace(scheduling_key, std::deque<TaskSpecification>()).first;
}
it->second.push_back(task_spec);
RequestNewWorkerIfNeeded(scheduling_key);
});
return Status::OK();
}
void CoreWorkerDirectTaskSubmitter::AddWorkerLeaseClient(
const rpc::WorkerAddress &addr, std::shared_ptr<WorkerLeaseInterface> lease_client) {
auto it = client_cache_.find(addr);
if (it == client_cache_.end()) {
client_cache_[addr] = std::shared_ptr<rpc::CoreWorkerClientInterface>(
client_factory_(addr.ip_address, addr.port));
RAY_LOG(INFO) << "Connected to " << addr.ip_address << ":" << addr.port;
}
int64_t expiration = current_time_ms() + lease_timeout_ms_;
worker_to_lease_client_.emplace(addr,
std::make_pair(std::move(lease_client), expiration));
}
void CoreWorkerDirectTaskSubmitter::OnWorkerIdle(
const rpc::WorkerAddress &addr, const SchedulingKey &scheduling_key, bool was_error,
const google::protobuf::RepeatedPtrField<rpc::ResourceMapEntry> &assigned_resources) {
auto lease_entry = worker_to_lease_client_[addr];
auto queue_entry = task_queues_.find(scheduling_key);
// Return the worker if there was an error executing the previous task,
// the previous task is an actor creation task,
// there are no more applicable queued tasks, or the lease is expired.
if (was_error || queue_entry == task_queues_.end() ||
current_time_ms() > lease_entry.second) {
auto status = lease_entry.first->ReturnWorker(addr.port, addr.worker_id, was_error);
if (!status.ok()) {
RAY_LOG(ERROR) << "Error returning worker to raylet: " << status.ToString();
}
worker_to_lease_client_.erase(addr);
} else {
auto &client = *client_cache_[addr];
PushNormalTask(addr, client, scheduling_key, queue_entry->second.front(),
assigned_resources);
queue_entry->second.pop_front();
// Delete the queue if it's now empty. Note that the queue cannot already be empty
// because this is the only place tasks are removed from it.
if (queue_entry->second.empty()) {
task_queues_.erase(queue_entry);
}
}
RequestNewWorkerIfNeeded(scheduling_key);
}
std::shared_ptr<WorkerLeaseInterface>
CoreWorkerDirectTaskSubmitter::GetOrConnectLeaseClient(
const rpc::Address *raylet_address) {
std::shared_ptr<WorkerLeaseInterface> lease_client;
if (raylet_address &&
ClientID::FromBinary(raylet_address->raylet_id()) != local_raylet_id_) {
// A remote raylet was specified. Connect to the raylet if needed.
ClientID raylet_id = ClientID::FromBinary(raylet_address->raylet_id());
auto it = remote_lease_clients_.find(raylet_id);
if (it == remote_lease_clients_.end()) {
RAY_LOG(DEBUG) << "Connecting to raylet " << raylet_id;
it = remote_lease_clients_
.emplace(raylet_id, lease_client_factory_(raylet_address->ip_address(),
raylet_address->port()))
.first;
}
lease_client = it->second;
} else {
lease_client = local_lease_client_;
}
return lease_client;
}
void CoreWorkerDirectTaskSubmitter::RequestNewWorkerIfNeeded(
const SchedulingKey &scheduling_key, const rpc::Address *raylet_address) {
if (pending_lease_requests_.find(scheduling_key) != pending_lease_requests_.end()) {
// There's already an outstanding lease request for this type of task.
return;
}
auto it = task_queues_.find(scheduling_key);
if (it == task_queues_.end()) {
// We don't have any of this type of task to run.
return;
}
auto lease_client = GetOrConnectLeaseClient(raylet_address);
TaskSpecification &resource_spec = it->second.front();
TaskID task_id = resource_spec.TaskId();
auto status = lease_client->RequestWorkerLease(
resource_spec,
[this, lease_client, task_id, scheduling_key](
const Status &status, const rpc::RequestWorkerLeaseReply &reply) mutable {
absl::MutexLock lock(&mu_);
pending_lease_requests_.erase(scheduling_key);
if (status.ok()) {
if (!reply.worker_address().raylet_id().empty()) {
// We got a lease for a worker. Add the lease client state and try to
// assign work to the worker.
RAY_LOG(DEBUG) << "Lease granted " << task_id;
rpc::WorkerAddress addr = {
reply.worker_address().ip_address(), reply.worker_address().port(),
WorkerID::FromBinary(reply.worker_address().worker_id()),
ClientID::FromBinary(reply.worker_address().raylet_id())};
AddWorkerLeaseClient(addr, std::move(lease_client));
auto resources_copy = reply.resource_mapping();
OnWorkerIdle(addr, scheduling_key,
/*error=*/false, resources_copy);
} else {
// The raylet redirected us to a different raylet to retry at.
RequestNewWorkerIfNeeded(scheduling_key, &reply.retry_at_raylet_address());
}
} else {
RetryLeaseRequest(status, lease_client, scheduling_key);
}
});
if (!status.ok()) {
RetryLeaseRequest(status, lease_client, scheduling_key);
}
pending_lease_requests_.insert(scheduling_key);
}
void CoreWorkerDirectTaskSubmitter::RetryLeaseRequest(
Status status, std::shared_ptr<WorkerLeaseInterface> lease_client,
const SchedulingKey &scheduling_key) {
if (lease_client != local_lease_client_) {
// A lease request to a remote raylet failed. Retry locally if the lease is
// still needed.
// TODO(swang): Fail after some number of retries?
RAY_LOG(ERROR) << "Retrying attempt to schedule task at remote node. Error: "
<< status.ToString();
RequestNewWorkerIfNeeded(scheduling_key);
} else {
// A local request failed. This shouldn't happen if the raylet is still alive
// and we don't currently handle raylet failures, so treat it as a fatal
// error.
RAY_LOG(FATAL) << "Lost connection with local raylet. Error: " << status.ToString();
}
}
void CoreWorkerDirectTaskSubmitter::PushNormalTask(
const rpc::WorkerAddress &addr, rpc::CoreWorkerClientInterface &client,
const SchedulingKey &scheduling_key, const TaskSpecification &task_spec,
const google::protobuf::RepeatedPtrField<rpc::ResourceMapEntry> &assigned_resources) {
auto task_id = task_spec.TaskId();
auto request = std::unique_ptr<rpc::PushTaskRequest>(new rpc::PushTaskRequest);
bool is_actor = task_spec.IsActorTask();
bool is_actor_creation = task_spec.IsActorCreationTask();
RAY_LOG(DEBUG) << "Pushing normal task " << task_spec.TaskId();
// NOTE(swang): CopyFrom is needed because if we use Swap here and the task
// fails, then the task data will be gone when the TaskManager attempts to
// access the task.
request->mutable_caller_address()->CopyFrom(rpc_address_);
request->mutable_task_spec()->CopyFrom(task_spec.GetMessage());
request->mutable_resource_mapping()->CopyFrom(assigned_resources);
request->set_intended_worker_id(addr.worker_id.Binary());
auto status = client.PushNormalTask(
std::move(request),
[this, task_id, is_actor, is_actor_creation, scheduling_key, addr,
assigned_resources](Status status, const rpc::PushTaskReply &reply) {
if (reply.worker_exiting()) {
// The worker is draining and will shutdown after it is done. Don't return
// it to the Raylet since that will kill it early.
absl::MutexLock lock(&mu_);
worker_to_lease_client_.erase(addr);
} else if (!status.ok() || !is_actor_creation) {
// Successful actor creation leases the worker indefinitely from the raylet.
absl::MutexLock lock(&mu_);
OnWorkerIdle(addr, scheduling_key,
/*error=*/!status.ok(), assigned_resources);
}
if (!status.ok()) {
// TODO: It'd be nice to differentiate here between process vs node
// failure (e.g., by contacting the raylet). If it was a process
// failure, it may have been an application-level error and it may
// not make sense to retry the task.
task_finisher_->PendingTaskFailed(
task_id,
is_actor ? rpc::ErrorType::ACTOR_DIED : rpc::ErrorType::WORKER_DIED,
&status);
} else {
rpc::Address proto = addr.ToProto();
task_finisher_->CompletePendingTask(task_id, reply, &proto);
}
});
if (!status.ok()) {
RAY_LOG(ERROR) << "Error pushing task to worker: " << status.ToString();
{
absl::MutexLock lock(&mu_);
OnWorkerIdle(addr, scheduling_key, /*error=*/true, assigned_resources);
}
task_finisher_->PendingTaskFailed(
task_id, is_actor ? rpc::ErrorType::ACTOR_DIED : rpc::ErrorType::WORKER_DIED,
&status);
}
}
}; // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/direct_task_transport.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_DIRECT_TASK_H
#define RAY_CORE_WORKER_DIRECT_TASK_H
#include <google/protobuf/repeated_field.h>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "ray/common/id.h"
#include "ray/common/ray_object.h"
#include "ray/core_worker/context.h"
#include "ray/core_worker/store_provider/memory_store/memory_store.h"
#include "ray/core_worker/task_manager.h"
#include "ray/core_worker/transport/dependency_resolver.h"
#include "ray/core_worker/transport/direct_actor_transport.h"
#include "ray/raylet/raylet_client.h"
#include "ray/rpc/worker/core_worker_client.h"
namespace ray {
typedef std::function<std::shared_ptr<WorkerLeaseInterface>(const std::string &ip_address,
int port)>
LeaseClientFactoryFn;
// The task queues are keyed on resource shape & function descriptor
// (encapsulated in SchedulingClass) to defer resource allocation decisions to the raylet
// and ensure fairness between different tasks, as well as plasma task dependencies as
// a performance optimization because the raylet will fetch plasma dependencies to the
// scheduled worker. It's also keyed on actor ID to ensure the actor creation task
// would always request a new worker lease. We need this to let raylet know about
// direct actor creation task, and reconstruct the actor if it dies. Otherwise if
// the actor creation task just reuses an existing worker, then raylet will not
// be aware of the actor and is not able to manage it.
using SchedulingKey = std::tuple<SchedulingClass, std::vector<ObjectID>, ActorID>;
// This class is thread-safe.
class CoreWorkerDirectTaskSubmitter {
public:
CoreWorkerDirectTaskSubmitter(rpc::Address rpc_address,
std::shared_ptr<WorkerLeaseInterface> lease_client,
rpc::ClientFactoryFn client_factory,
LeaseClientFactoryFn lease_client_factory,
std::shared_ptr<CoreWorkerMemoryStore> store,
std::shared_ptr<TaskFinisherInterface> task_finisher,
ClientID local_raylet_id, int64_t lease_timeout_ms)
: rpc_address_(rpc_address),
local_lease_client_(lease_client),
client_factory_(client_factory),
lease_client_factory_(lease_client_factory),
resolver_(store, task_finisher),
task_finisher_(task_finisher),
local_raylet_id_(local_raylet_id),
lease_timeout_ms_(lease_timeout_ms) {}
/// Schedule a task for direct submission to a worker.
///
/// \param[in] task_spec The task to schedule.
Status SubmitTask(TaskSpecification task_spec);
private:
/// Schedule more work onto an idle worker or return it back to the raylet if
/// no more tasks are queued for submission. If an error was encountered
/// processing the worker, we don't attempt to re-use the worker.
///
/// \param[in] addr The address of the worker.
/// \param[in] task_queue_key The scheduling class of the worker.
/// \param[in] was_error Whether the task failed to be submitted.
/// \param[in] assigned_resources Resource ids previously assigned to the worker.
void OnWorkerIdle(
const rpc::WorkerAddress &addr, const SchedulingKey &task_queue_key, bool was_error,
const google::protobuf::RepeatedPtrField<rpc::ResourceMapEntry> &assigned_resources)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Retry a failed lease request.
void RetryLeaseRequest(Status status,
std::shared_ptr<WorkerLeaseInterface> lease_client,
const SchedulingKey &scheduling_key)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Get an existing lease client or connect a new one. If a raylet_address is
/// provided, this connects to a remote raylet. Else, this connects to the
/// local raylet.
std::shared_ptr<WorkerLeaseInterface> GetOrConnectLeaseClient(
const rpc::Address *raylet_address) EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Request a new worker from the raylet if no such requests are currently in
/// flight and there are tasks queued. If a raylet address is provided, then
/// the worker should be requested from the raylet at that address. Else, the
/// worker should be requested from the local raylet.
void RequestNewWorkerIfNeeded(const SchedulingKey &task_queue_key,
const rpc::Address *raylet_address = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Set up client state for newly granted worker lease.
void AddWorkerLeaseClient(const rpc::WorkerAddress &addr,
std::shared_ptr<WorkerLeaseInterface> lease_client)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
/// Push a task to a specific worker.
void PushNormalTask(const rpc::WorkerAddress &addr,
rpc::CoreWorkerClientInterface &client,
const SchedulingKey &task_queue_key,
const TaskSpecification &task_spec,
const google::protobuf::RepeatedPtrField<rpc::ResourceMapEntry>
&assigned_resources);
/// Address of our RPC server.
rpc::Address rpc_address_;
// Client that can be used to lease and return workers from the local raylet.
std::shared_ptr<WorkerLeaseInterface> local_lease_client_;
/// Cache of gRPC clients to remote raylets.
absl::flat_hash_map<ClientID, std::shared_ptr<WorkerLeaseInterface>>
remote_lease_clients_ GUARDED_BY(mu_);
/// Factory for producing new core worker clients.
rpc::ClientFactoryFn client_factory_;
/// Factory for producing new clients to request leases from remote nodes.
LeaseClientFactoryFn lease_client_factory_;
/// Resolve local and remote dependencies;
LocalDependencyResolver resolver_;
/// Used to complete tasks.
std::shared_ptr<TaskFinisherInterface> task_finisher_;
/// The timeout for worker leases; after this duration, workers will be returned
/// to the raylet.
int64_t lease_timeout_ms_;
/// The local raylet ID. Used to make sure that we use the local lease client
/// if a remote raylet tells us to spill the task back to the local raylet.
const ClientID local_raylet_id_;
// Protects task submission state below.
absl::Mutex mu_;
/// Cache of gRPC clients to other workers.
absl::flat_hash_map<rpc::WorkerAddress, std::shared_ptr<rpc::CoreWorkerClientInterface>>
client_cache_ GUARDED_BY(mu_);
/// Map from worker address to the lease client through which it should be
/// returned and its lease expiration time.
absl::flat_hash_map<rpc::WorkerAddress,
std::pair<std::shared_ptr<WorkerLeaseInterface>, int64_t>>
worker_to_lease_client_ GUARDED_BY(mu_);
// Keeps track of pending worker lease requests to the raylet.
absl::flat_hash_set<SchedulingKey> pending_lease_requests_ GUARDED_BY(mu_);
// Tasks that are queued for execution. We keep individual queues per
// scheduling class to ensure fairness.
// Invariant: if a queue is in this map, it has at least one task.
absl::flat_hash_map<SchedulingKey, std::deque<TaskSpecification>> task_queues_
GUARDED_BY(mu_);
};
}; // namespace ray
#endif // RAY_CORE_WORKER_DIRECT_TASK_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/raylet_transport.cc
|
C++
|
#include "ray/core_worker/transport/raylet_transport.h"
#include "ray/common/common_protocol.h"
#include "ray/common/task/task.h"
namespace ray {
CoreWorkerRayletTaskReceiver::CoreWorkerRayletTaskReceiver(
const WorkerID &worker_id, std::shared_ptr<raylet::RayletClient> &raylet_client,
const TaskHandler &task_handler, const std::function<void(bool)> &exit_handler)
: worker_id_(worker_id),
raylet_client_(raylet_client),
task_handler_(task_handler),
exit_handler_(exit_handler) {}
void CoreWorkerRayletTaskReceiver::HandleAssignTask(
const rpc::AssignTaskRequest &request, rpc::AssignTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) {
const Task task(request.task());
const auto &task_spec = task.GetTaskSpecification();
RAY_LOG(DEBUG) << "Received task " << task_spec.TaskId() << " is create "
<< task_spec.IsActorCreationTask();
// Set the resource IDs for this task.
// TODO: convert the resource map to protobuf and change this.
auto resource_ids = std::make_shared<ResourceMappingType>();
auto resource_infos =
flatbuffers::GetRoot<protocol::ResourceIdSetInfos>(request.resource_ids().data())
->resource_infos();
for (size_t i = 0; i < resource_infos->size(); ++i) {
auto const &fractional_resource_ids = resource_infos->Get(i);
auto &acquired_resources =
(*resource_ids)[string_from_flatbuf(*fractional_resource_ids->resource_name())];
size_t num_resource_ids = fractional_resource_ids->resource_ids()->size();
size_t num_resource_fractions = fractional_resource_ids->resource_fractions()->size();
RAY_CHECK(num_resource_ids == num_resource_fractions);
RAY_CHECK(num_resource_ids > 0);
for (size_t j = 0; j < num_resource_ids; ++j) {
int64_t resource_id = fractional_resource_ids->resource_ids()->Get(j);
double resource_fraction = fractional_resource_ids->resource_fractions()->Get(j);
if (num_resource_ids > 1) {
int64_t whole_fraction = resource_fraction;
RAY_CHECK(whole_fraction == resource_fraction);
}
acquired_resources.push_back(std::make_pair(resource_id, resource_fraction));
}
}
std::vector<std::shared_ptr<RayObject>> results;
auto status = task_handler_(task_spec, resource_ids, &results);
if (status.IsSystemExit()) {
exit_handler_(status.IsIntentionalSystemExit());
return;
}
RAY_LOG(DEBUG) << "Assigned task " << task_spec.TaskId() << " finished execution.";
// Notify raylet that current task is done via a `TaskDone` message. This is to
// ensure that the task is marked as finished by raylet only after previous
// raylet client calls are completed. For example, if the worker sends a
// NotifyUnblocked message that it is no longer blocked in a `ray.get`
// on the normal raylet socket, then completes an assigned task, we
// need to guarantee that raylet gets the former message first before
// marking the task as completed. This is why a `TaskDone` message
// is required - without it, it's possible that raylet receives
// rpc reply first before the NotifyUnblocked message arrives,
// as they use different connections, the `TaskDone` message is sent
// to raylet via the same connection so the order is guaranteed.
RAY_UNUSED(raylet_client_->TaskDone());
// Send rpc reply.
send_reply_callback(status, nullptr, nullptr);
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/core_worker/transport/raylet_transport.h
|
C/C++ Header
|
#ifndef RAY_CORE_WORKER_RAYLET_TRANSPORT_H
#define RAY_CORE_WORKER_RAYLET_TRANSPORT_H
#include <list>
#include "ray/common/ray_object.h"
#include "ray/raylet/raylet_client.h"
#include "ray/rpc/worker/core_worker_server.h"
namespace ray {
class CoreWorkerRayletTaskReceiver {
public:
using TaskHandler =
std::function<Status(const TaskSpecification &task_spec,
const std::shared_ptr<ResourceMappingType> &resource_ids,
std::vector<std::shared_ptr<RayObject>> *return_objects)>;
CoreWorkerRayletTaskReceiver(const WorkerID &worker_id,
std::shared_ptr<raylet::RayletClient> &raylet_client,
const TaskHandler &task_handler,
const std::function<void(bool)> &exit_handler);
/// Handle a `AssignTask` request.
/// The implementation can handle this request asynchronously. When handling is done,
/// the `send_reply_callback` should be called.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
void HandleAssignTask(const rpc::AssignTaskRequest &request,
rpc::AssignTaskReply *reply,
rpc::SendReplyCallback send_reply_callback);
private:
// WorkerID of this worker.
WorkerID worker_id_;
/// Reference to the core worker's raylet client. This is a pointer ref so that it
/// can be initialized by core worker after this class is constructed.
std::shared_ptr<raylet::RayletClient> &raylet_client_;
/// The callback function to process a task.
TaskHandler task_handler_;
/// The callback function to exit the worker.
std::function<void(bool)> exit_handler_;
/// The callback to process arg wait complete.
std::function<void(int64_t)> on_wait_complete_;
};
} // namespace ray
#endif // RAY_CORE_WORKER_RAYLET_TRANSPORT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/accessor.h
|
C/C++ Header
|
#ifndef RAY_GCS_ACCESSOR_H
#define RAY_GCS_ACCESSOR_H
#include "ray/common/id.h"
#include "ray/gcs/callback.h"
#include "ray/gcs/entry_change_notification.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
namespace gcs {
/// \class ActorInfoAccessor
/// `ActorInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// actor information in the GCS.
class ActorInfoAccessor {
public:
virtual ~ActorInfoAccessor() = default;
/// Get actor specification from GCS asynchronously.
///
/// \param actor_id The ID of actor to look up in the GCS.
/// \param callback Callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGet(const ActorID &actor_id,
const OptionalItemCallback<rpc::ActorTableData> &callback) = 0;
/// Register an actor to GCS asynchronously.
///
/// \param data_ptr The actor that will be registered to the GCS.
/// \param callback Callback that will be called after actor has been registered
/// to the GCS.
/// \return Status
virtual Status AsyncRegister(const std::shared_ptr<rpc::ActorTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Update dynamic states of actor in GCS asynchronously.
///
/// \param actor_id ID of the actor to update.
/// \param data_ptr Data of the actor to update.
/// \param callback Callback that will be called after update finishes.
/// \return Status
/// TODO(micafan) Don't expose the whole `ActorTableData` and only allow
/// updating dynamic states.
virtual Status AsyncUpdate(const ActorID &actor_id,
const std::shared_ptr<rpc::ActorTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Subscribe to any register or update operations of actors.
///
/// \param subscribe Callback that will be called each time when an actor is registered
/// or updated.
/// \param done Callback that will be called when subscription is complete and we
/// are ready to receive notification.
/// \return Status
virtual Status AsyncSubscribeAll(
const SubscribeCallback<ActorID, rpc::ActorTableData> &subscribe,
const StatusCallback &done) = 0;
/// Subscribe to any update operations of an actor.
///
/// \param actor_id The ID of actor to be subscribed to.
/// \param subscribe Callback that will be called each time when the actor is updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribe(
const ActorID &actor_id,
const SubscribeCallback<ActorID, rpc::ActorTableData> &subscribe,
const StatusCallback &done) = 0;
/// Cancel subscription to an actor.
///
/// \param actor_id The ID of the actor to be unsubscribed to.
/// \param done Callback that will be called when unsubscribe is complete.
/// \return Status
virtual Status AsyncUnsubscribe(const ActorID &actor_id,
const StatusCallback &done) = 0;
/// Add actor checkpoint data to GCS asynchronously.
///
/// \param data_ptr The checkpoint data that will be added to GCS.
/// \param callback The callback that will be called after add finishes.
/// \return Status
/// TODO(micafan) When the GCS backend is redis,
/// the checkpoint of the same actor needs to be updated serially,
/// otherwise the checkpoint may be overwritten. This issue will be resolved if
/// necessary.
virtual Status AsyncAddCheckpoint(
const std::shared_ptr<rpc::ActorCheckpointData> &data_ptr,
const StatusCallback &callback) = 0;
/// Get actor checkpoint data from GCS asynchronously.
///
/// \param checkpoint_id The ID of checkpoint to lookup in GCS.
/// \param callback The callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGetCheckpoint(
const ActorCheckpointID &checkpoint_id,
const OptionalItemCallback<rpc::ActorCheckpointData> &callback) = 0;
/// Get actor checkpoint id data from GCS asynchronously.
///
/// \param actor_id The ID of actor to lookup in GCS.
/// \param callback The callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGetCheckpointID(
const ActorID &actor_id,
const OptionalItemCallback<rpc::ActorCheckpointIdData> &callback) = 0;
protected:
ActorInfoAccessor() = default;
};
/// \class JobInfoAccessor
/// `JobInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// job information in the GCS.
class JobInfoAccessor {
public:
virtual ~JobInfoAccessor() = default;
/// Add a job to GCS asynchronously.
///
/// \param data_ptr The job that will be add to GCS.
/// \param callback Callback that will be called after job has been added
/// to GCS.
/// \return Status
virtual Status AsyncAdd(const std::shared_ptr<rpc::JobTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Mark job as finished in GCS asynchronously.
///
/// \param job_id ID of the job that will be make finished to GCS.
/// \param callback Callback that will be called after update finished.
/// \return Status
virtual Status AsyncMarkFinished(const JobID &job_id,
const StatusCallback &callback) = 0;
/// Subscribe to finished jobs.
///
/// \param subscribe Callback that will be called each time when a job finishes.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeToFinishedJobs(
const SubscribeCallback<JobID, rpc::JobTableData> &subscribe,
const StatusCallback &done) = 0;
protected:
JobInfoAccessor() = default;
};
/// \class TaskInfoAccessor
/// `TaskInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// task information in the GCS.
class TaskInfoAccessor {
public:
virtual ~TaskInfoAccessor() {}
/// Add a task to GCS asynchronously.
///
/// \param data_ptr The task that will be added to GCS.
/// \param callback Callback that will be called after task has been added
/// to GCS.
/// \return Status
virtual Status AsyncAdd(const std::shared_ptr<rpc::TaskTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Get task information from GCS asynchronously.
///
/// \param task_id The ID of the task to look up in GCS.
/// \param callback Callback that is called after lookup finished.
/// \return Status
virtual Status AsyncGet(const TaskID &task_id,
const OptionalItemCallback<rpc::TaskTableData> &callback) = 0;
/// Delete tasks from GCS asynchronously.
///
/// \param task_ids The vector of IDs to delete from GCS.
/// \param callback Callback that is called after delete finished.
/// \return Status
// TODO(micafan) Will support callback of batch deletion in the future.
// Currently this callback will never be called.
virtual Status AsyncDelete(const std::vector<TaskID> &task_ids,
const StatusCallback &callback) = 0;
/// Subscribe asynchronously to the event that the given task is added in GCS.
///
/// \param task_id The ID of the task to be subscribed to.
/// \param subscribe Callback that will be called each time when the task is updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribe(
const TaskID &task_id,
const SubscribeCallback<TaskID, rpc::TaskTableData> &subscribe,
const StatusCallback &done) = 0;
/// Cancel subscription to a task asynchronously.
///
/// \param task_id The ID of the task to be unsubscribed to.
/// \param done Callback that will be called when unsubscribe is complete.
/// \return Status
virtual Status AsyncUnsubscribe(const TaskID &task_id, const StatusCallback &done) = 0;
/// Add a task lease to GCS asynchronously.
///
/// \param data_ptr The task lease that will be added to GCS.
/// \param callback Callback that will be called after task lease has been added
/// to GCS.
/// \return Status
virtual Status AsyncAddTaskLease(const std::shared_ptr<rpc::TaskLeaseData> &data_ptr,
const StatusCallback &callback) = 0;
/// Subscribe asynchronously to the event that the given task lease is added in GCS.
///
/// \param task_id The ID of the task to be subscribed to.
/// \param subscribe Callback that will be called each time when the task lease is
/// updated or the task lease is empty currently.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeTaskLease(
const TaskID &task_id,
const SubscribeCallback<TaskID, boost::optional<rpc::TaskLeaseData>> &subscribe,
const StatusCallback &done) = 0;
/// Cancel subscription to a task lease asynchronously.
///
/// \param task_id The ID of the task to be unsubscribed to.
/// \param done Callback that will be called when unsubscribe is complete.
/// \return Status
virtual Status AsyncUnsubscribeTaskLease(const TaskID &task_id,
const StatusCallback &done) = 0;
/// Attempt task reconstruction to GCS asynchronously.
///
/// \param data_ptr The task reconstruction that will be added to GCS.
/// \param callback Callback that will be called after task reconstruction
/// has been added to GCS.
/// \return Status
virtual Status AttemptTaskReconstruction(
const std::shared_ptr<rpc::TaskReconstructionData> &data_ptr,
const StatusCallback &callback) = 0;
protected:
TaskInfoAccessor() = default;
};
/// `ObjectInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// object information in the GCS.
class ObjectInfoAccessor {
public:
virtual ~ObjectInfoAccessor() {}
/// Get object's locations from GCS asynchronously.
///
/// \param object_id The ID of object to lookup in GCS.
/// \param callback Callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGetLocations(
const ObjectID &object_id,
const MultiItemCallback<rpc::ObjectTableData> &callback) = 0;
/// Add location of object to GCS asynchronously.
///
/// \param object_id The ID of object which location will be added to GCS.
/// \param node_id The location that will be added to GCS.
/// \param callback Callback that will be called after object has been added to GCS.
/// \return Status
virtual Status AsyncAddLocation(const ObjectID &object_id, const ClientID &node_id,
const StatusCallback &callback) = 0;
/// Remove location of object from GCS asynchronously.
///
/// \param object_id The ID of object which location will be removed from GCS.
/// \param node_id The location that will be removed from GCS.
/// \param callback Callback that will be called after the delete finished.
/// \return Status
virtual Status AsyncRemoveLocation(const ObjectID &object_id, const ClientID &node_id,
const StatusCallback &callback) = 0;
/// Subscribe to any update of an object's location.
///
/// \param object_id The ID of the object to be subscribed to.
/// \param subscribe Callback that will be called each time when the object's
/// location is updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeToLocations(
const ObjectID &object_id,
const SubscribeCallback<ObjectID, ObjectChangeNotification> &subscribe,
const StatusCallback &done) = 0;
/// Cancel subscription to any update of an object's location.
///
/// \param object_id The ID of the object to be unsubscribed to.
/// \param done Callback that will be called when unsubscription is complete.
/// \return Status
virtual Status AsyncUnsubscribeToLocations(const ObjectID &object_id,
const StatusCallback &done) = 0;
protected:
ObjectInfoAccessor() = default;
};
/// \class NodeInfoAccessor
/// `NodeInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// node information in the GCS.
class NodeInfoAccessor {
public:
virtual ~NodeInfoAccessor() = default;
/// Register local node to GCS synchronously.
///
/// \param node_info The information of node to register to GCS.
/// \return Status
virtual Status RegisterSelf(const rpc::GcsNodeInfo &local_node_info) = 0;
/// Cancel registration of local node to GCS synchronously.
///
/// \return Status
virtual Status UnregisterSelf() = 0;
/// Get id of local node which was registered by 'RegisterSelf'.
///
/// \return ClientID
virtual const ClientID &GetSelfId() const = 0;
/// Get information of local node which was registered by 'RegisterSelf'.
///
/// \return GcsNodeInfo
virtual const rpc::GcsNodeInfo &GetSelfInfo() const = 0;
/// Register a node to GCS asynchronously.
///
/// \param node_info The information of node to register to GCS.
/// \param callback Callback that will be called when registration is complete.
/// \return Status
virtual Status AsyncRegister(const rpc::GcsNodeInfo &node_info,
const StatusCallback &callback) = 0;
/// Cancel registration of a node to GCS asynchronously.
///
/// \param node_id The ID of node that to be unregistered.
/// \param callback Callback that will be called when unregistration is complete.
/// \return Status
virtual Status AsyncUnregister(const ClientID &node_id,
const StatusCallback &callback) = 0;
/// Get information of all nodes from GCS asynchronously.
///
/// \param callback Callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGetAll(const MultiItemCallback<rpc::GcsNodeInfo> &callback) = 0;
/// Subscribe to node addition and removal events from GCS and cache those information.
///
/// \param subscribe Callback that will be called if a node is
/// added or a node is removed.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeToNodeChange(
const SubscribeCallback<ClientID, rpc::GcsNodeInfo> &subscribe,
const StatusCallback &done) = 0;
/// Get node information from local cache.
/// Non-thread safe.
/// Note, the local cache is only available if `AsyncSubscribeToNodeChange`
/// is called before.
///
/// \param node_id The ID of node to look up in local cache.
/// \return The item returned by GCS. If the item to read doesn't exist,
/// this optional object is empty.
virtual boost::optional<rpc::GcsNodeInfo> Get(const ClientID &node_id) const = 0;
/// Get information of all nodes from local cache.
/// Non-thread safe.
/// Note, the local cache is only available if `AsyncSubscribeToNodeChange`
/// is called before.
///
/// \return All nodes in cache.
virtual const std::unordered_map<ClientID, rpc::GcsNodeInfo> &GetAll() const = 0;
/// Search the local cache to find out if the given node is removed.
/// Non-thread safe.
/// Note, the local cache is only available if `AsyncSubscribeToNodeChange`
/// is called before.
///
/// \param node_id The id of the node to check.
/// \return Whether the node is removed.
virtual bool IsRemoved(const ClientID &node_id) const = 0;
// TODO(micafan) Define ResourceMap in GCS proto.
typedef std::unordered_map<std::string, std::shared_ptr<rpc::ResourceTableData>>
ResourceMap;
/// Get node's resources from GCS asynchronously.
///
/// \param node_id The ID of node to lookup dynamic resources.
/// \param callback Callback that will be called after lookup finishes.
/// \return Status
virtual Status AsyncGetResources(const ClientID &node_id,
const OptionalItemCallback<ResourceMap> &callback) = 0;
/// Update resources of node in GCS asynchronously.
///
/// \param node_id The ID of node to update dynamic resources.
/// \param resources The dynamic resources of node to be updated.
/// \param callback Callback that will be called after update finishes.
virtual Status AsyncUpdateResources(const ClientID &node_id,
const ResourceMap &resources,
const StatusCallback &callback) = 0;
/// Delete resources of a node from GCS asynchronously.
///
/// \param node_id The ID of node to delete resources from GCS.
/// \param resource_names The names of resource to be deleted.
/// \param callback Callback that will be called after delete finishes.
virtual Status AsyncDeleteResources(const ClientID &node_id,
const std::vector<std::string> &resource_names,
const StatusCallback &callback) = 0;
/// Subscribe to node resource changes.
///
/// \param subscribe Callback that will be called when any resource is updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeToResources(
const SubscribeCallback<ClientID, ResourceChangeNotification> &subscribe,
const StatusCallback &done) = 0;
/// Report heartbeat of a node to GCS asynchronously.
///
/// \param data_ptr The heartbeat that will be reported to GCS.
/// \param callback Callback that will be called after report finishes.
/// \return Status
// TODO(micafan) NodeStateAccessor will call this method to report heartbeat.
virtual Status AsyncReportHeartbeat(
const std::shared_ptr<rpc::HeartbeatTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Subscribe to the heartbeat of each node from GCS.
///
/// \param subscribe Callback that will be called each time when heartbeat is updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeHeartbeat(
const SubscribeCallback<ClientID, rpc::HeartbeatTableData> &subscribe,
const StatusCallback &done) = 0;
/// Report state of all nodes to GCS asynchronously.
///
/// \param data_ptr The heartbeats that will be reported to GCS.
/// \param callback Callback that will be called after report finishes.
/// \return Status
virtual Status AsyncReportBatchHeartbeat(
const std::shared_ptr<rpc::HeartbeatBatchTableData> &data_ptr,
const StatusCallback &callback) = 0;
/// Subscribe batched state of all nodes from GCS.
///
/// \param subscribe Callback that will be called each time when batch heartbeat is
/// updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeBatchHeartbeat(
const ItemCallback<rpc::HeartbeatBatchTableData> &subscribe,
const StatusCallback &done) = 0;
protected:
NodeInfoAccessor() = default;
};
/// \class ErrorInfoAccessor
/// `ErrorInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// error information in the GCS.
class ErrorInfoAccessor {
public:
virtual ~ErrorInfoAccessor() = default;
/// Report a job error to GCS asynchronously.
/// The error message will be pushed to the driver of a specific if it is
/// a job internal error, or broadcast to all drivers if it is a system error.
///
/// TODO(rkn): We need to make sure that the errors are unique because
/// duplicate messages currently cause failures (the GCS doesn't allow it). A
/// natural way to do this is to have finer-grained time stamps.
///
/// \param data_ptr The error message that will be reported to GCS.
/// \param callback Callback that will be called when report is complete.
/// \return Status
virtual Status AsyncReportJobError(const std::shared_ptr<rpc::ErrorTableData> &data_ptr,
const StatusCallback &callback) = 0;
protected:
ErrorInfoAccessor() = default;
};
/// \class StatsInfoAccessor
/// `StatsInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// stats in the GCS.
class StatsInfoAccessor {
public:
virtual ~StatsInfoAccessor() = default;
/// Add profile data to GCS asynchronously.
///
/// \param data_ptr The profile data that will be added to GCS.
/// \param callback Callback that will be called when add is complete.
/// \return Status
virtual Status AsyncAddProfileData(
const std::shared_ptr<rpc::ProfileTableData> &data_ptr,
const StatusCallback &callback) = 0;
protected:
StatsInfoAccessor() = default;
};
/// \class WorkerInfoAccessor
/// `WorkerInfoAccessor` is a sub-interface of `GcsClient`.
/// This class includes all the methods that are related to accessing
/// worker information in the GCS.
class WorkerInfoAccessor {
public:
virtual ~WorkerInfoAccessor() = default;
/// Subscribe to all unexpected failure of workers from GCS asynchronously.
/// Note that this does not include workers that failed due to node failure.
///
/// \param subscribe Callback that will be called each time when a worker failed.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
virtual Status AsyncSubscribeToWorkerFailures(
const SubscribeCallback<WorkerID, rpc::WorkerFailureData> &subscribe,
const StatusCallback &done) = 0;
/// Report a worker failure to GCS asynchronously.
///
/// \param data_ptr The worker failure information that will be reported to GCS.
/// \param callback Callback that will be called when report is complate.
/// \param Status
virtual Status AsyncReportWorkerFailure(
const std::shared_ptr<rpc::WorkerFailureData> &data_ptr,
const StatusCallback &callback) = 0;
protected:
WorkerInfoAccessor() = default;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_ACCESSOR_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/asio.cc
|
C++
|
#include "asio.h"
#include "ray/util/logging.h"
RedisAsioClient::RedisAsioClient(boost::asio::io_service &io_service,
ray::gcs::RedisAsyncContext &redis_async_context)
: redis_async_context_(redis_async_context),
io_service_(io_service),
socket_(io_service),
read_requested_(false),
write_requested_(false),
read_in_progress_(false),
write_in_progress_(false) {
redisAsyncContext *async_context = redis_async_context_.GetRawRedisAsyncContext();
// gives access to c->fd
redisContext *c = &(async_context->c);
// hiredis is already connected
// use the existing native socket
socket_.assign(boost::asio::ip::tcp::v4(), c->fd);
// register hooks with the hiredis async context
async_context->ev.addRead = call_C_addRead;
async_context->ev.delRead = call_C_delRead;
async_context->ev.addWrite = call_C_addWrite;
async_context->ev.delWrite = call_C_delWrite;
async_context->ev.cleanup = call_C_cleanup;
// C wrapper functions will use this pointer to call class members.
async_context->ev.data = this;
}
void RedisAsioClient::operate() {
if (read_requested_ && !read_in_progress_) {
read_in_progress_ = true;
socket_.async_read_some(boost::asio::null_buffers(),
boost::bind(&RedisAsioClient::handle_read, this,
boost::asio::placeholders::error));
}
if (write_requested_ && !write_in_progress_) {
write_in_progress_ = true;
socket_.async_write_some(boost::asio::null_buffers(),
boost::bind(&RedisAsioClient::handle_write, this,
boost::asio::placeholders::error));
}
}
void RedisAsioClient::handle_read(boost::system::error_code error_code) {
RAY_CHECK(!error_code || error_code == boost::asio::error::would_block);
read_in_progress_ = false;
redis_async_context_.RedisAsyncHandleRead();
if (error_code == boost::asio::error::would_block) {
operate();
}
}
void RedisAsioClient::handle_write(boost::system::error_code error_code) {
RAY_CHECK(!error_code || error_code == boost::asio::error::would_block);
write_in_progress_ = false;
redis_async_context_.RedisAsyncHandleWrite();
if (error_code == boost::asio::error::would_block) {
operate();
}
}
void RedisAsioClient::add_read() {
// Because redis commands are non-thread safe, dispatch the operation to backend thread.
io_service_.dispatch([this]() {
read_requested_ = true;
operate();
});
}
void RedisAsioClient::del_read() { read_requested_ = false; }
void RedisAsioClient::add_write() {
// Because redis commands are non-thread safe, dispatch the operation to backend thread.
io_service_.dispatch([this]() {
write_requested_ = true;
operate();
});
}
void RedisAsioClient::del_write() { write_requested_ = false; }
void RedisAsioClient::cleanup() {}
static inline RedisAsioClient *cast_to_client(void *private_data) {
RAY_CHECK(private_data != nullptr);
return static_cast<RedisAsioClient *>(private_data);
}
extern "C" void call_C_addRead(void *private_data) {
cast_to_client(private_data)->add_read();
}
extern "C" void call_C_delRead(void *private_data) {
cast_to_client(private_data)->del_read();
}
extern "C" void call_C_addWrite(void *private_data) {
cast_to_client(private_data)->add_write();
}
extern "C" void call_C_delWrite(void *private_data) {
cast_to_client(private_data)->del_write();
}
extern "C" void call_C_cleanup(void *private_data) {
cast_to_client(private_data)->cleanup();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/asio.h
|
C/C++ Header
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// Adapted from https://github.com/ryangraham/hiredis-boostasio-adapter
// (Copyright 2018 Ryan Graham)
#ifndef RAY_GCS_ASIO_H
#define RAY_GCS_ASIO_H
#include <stdio.h>
#include <iostream>
#include <string>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
#include "ray/gcs/redis_async_context.h"
class RedisAsioClient {
public:
/// Constructor of RedisAsioClient.
/// Use single-threaded io_service as event loop (because the redis commands
/// that will run in the event loop are non-thread safe).
///
/// \param io_service The single-threaded event loop for this client.
/// \param redis_async_context The redis async context used to execute redis commands
/// for this client.
RedisAsioClient(boost::asio::io_service &io_service,
ray::gcs::RedisAsyncContext &redis_async_context);
void operate();
void handle_read(boost::system::error_code ec);
void handle_write(boost::system::error_code ec);
void add_read();
void del_read();
void add_write();
void del_write();
void cleanup();
private:
ray::gcs::RedisAsyncContext &redis_async_context_;
boost::asio::io_service &io_service_;
boost::asio::ip::tcp::socket socket_;
// Hiredis wanted to add a read operation to the event loop
// but the read might not have happened yet
bool read_requested_;
// Hiredis wanted to add a write operation to the event loop
// but the read might not have happened yet
bool write_requested_;
// A read is currently in progress
bool read_in_progress_;
// A write is currently in progress
bool write_in_progress_;
};
// C wrappers for class member functions
extern "C" void call_C_addRead(void *private_data);
extern "C" void call_C_delRead(void *private_data);
extern "C" void call_C_addWrite(void *private_data);
extern "C" void call_C_delWrite(void *private_data);
extern "C" void call_C_cleanup(void *private_data);
#endif // RAY_GCS_ASIO_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/callback.h
|
C/C++ Header
|
#ifndef RAY_GCS_CALLBACK_H
#define RAY_GCS_CALLBACK_H
#if defined(__clang__) && defined(_MSC_VER)
// TODO(mehrdadn): Remove this Windows (clang-cl) workaround once we upgrade to
// Boost > 1.68: https://lists.boost.org/Archives/boost/2018/09/243420.php
#include <boost/type_traits.hpp>
#endif
#include <boost/optional/optional.hpp>
#include <vector>
#include "ray/common/status.h"
namespace ray {
namespace gcs {
/// This callback is used to notify when a write/subscribe to GCS completes.
/// \param status Status indicates whether the write/subscribe was successful.
using StatusCallback = std::function<void(Status status)>;
/// This callback is used to receive one item from GCS when a read completes.
/// \param status Status indicates whether the read was successful.
/// \param result The item returned by GCS. If the item to read doesn't exist,
/// this optional object is empty.
template <typename Data>
using OptionalItemCallback =
std::function<void(Status status, const boost::optional<Data> &result)>;
/// This callback is used to receive multiple items from GCS when a read completes.
/// \param status Status indicates whether the read was successful.
/// \param result The items returned by GCS.
template <typename Data>
using MultiItemCallback =
std::function<void(Status status, const std::vector<Data> &result)>;
/// This callback is used to receive notifications of the subscribed items in the GCS.
/// \param id The id of the item.
/// \param result The notification message.
template <typename ID, typename Data>
using SubscribeCallback = std::function<void(const ID &id, const Data &result)>;
/// This callback is used to receive a single item from GCS.
/// \param result The item returned by GCS.
template <typename Data>
using ItemCallback = std::function<void(const Data &result)>;
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_CALLBACK_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/entry_change_notification.h
|
C/C++ Header
|
#ifndef RAY_GCS_ENTRY_CHANGE_NOTIFICATION_H
#define RAY_GCS_ENTRY_CHANGE_NOTIFICATION_H
#include <ray/protobuf/gcs.pb.h>
#include <vector>
namespace ray {
namespace gcs {
/// \class EntryChangeNotification
/// EntryChangeNotification class is a template class which represent
/// notification of entry change from GCS.
template <typename Data>
class EntryChangeNotification {
public:
EntryChangeNotification(rpc::GcsChangeMode change_mode, Data data)
: change_mode_(change_mode), data_(std::move(data)) {}
EntryChangeNotification(EntryChangeNotification &&other) {
change_mode_ = other.change_mode_;
data_ = std::move(other.data_);
}
EntryChangeNotification &operator=(EntryChangeNotification &&other) {
change_mode_ = other.change_mode_;
data_ = std::move(other.data_);
}
/// Whether the entry data is removed from GCS.
bool IsRemoved() const { return change_mode_ == rpc::GcsChangeMode::REMOVE; }
/// Whether the entry data is added to GCS.
bool IsAdded() const { return change_mode_ == rpc::GcsChangeMode::APPEND_OR_ADD; }
/// Get change mode of this notification. For test only.
///
/// \return rpc::GcsChangeMode
rpc::GcsChangeMode GetGcsChangeMode() const { return change_mode_; }
/// Get data of this notification.
///
/// \return Data
const Data &GetData() const { return data_; }
private:
rpc::GcsChangeMode change_mode_;
Data data_;
};
template <typename Data>
using ArrayNotification = EntryChangeNotification<std::vector<Data>>;
typedef ArrayNotification<rpc::ObjectTableData> ObjectChangeNotification;
template <typename key, typename Value>
using MapNotification =
EntryChangeNotification<std::unordered_map<key, std::shared_ptr<Value>>>;
typedef MapNotification<std::string, rpc::ResourceTableData> ResourceChangeNotification;
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_ENTRY_CHANGE_NOTIFICATION_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_client.h
|
C/C++ Header
|
#ifndef RAY_GCS_GCS_CLIENT_H
#define RAY_GCS_GCS_CLIENT_H
#include <boost/asio.hpp>
#include <memory>
#include <string>
#include <vector>
#include "ray/common/status.h"
#include "ray/gcs/accessor.h"
#include "ray/util/logging.h"
namespace ray {
namespace gcs {
/// \class GcsClientOptions
/// GCS client's options (configuration items), such as service address, and service
/// password.
class GcsClientOptions {
public:
/// Constructor of GcsClientOptions.
///
/// \param ip GCS service ip.
/// \param port GCS service port.
/// \param password GCS service password.
/// \param is_test_client Whether this client is used for tests.
GcsClientOptions(const std::string &ip, int port, const std::string &password,
bool is_test_client = false)
: server_ip_(ip),
server_port_(port),
password_(password),
is_test_client_(is_test_client) {}
// GCS server address
std::string server_ip_;
int server_port_;
// Password of GCS server.
std::string password_;
// Whether this client is used for tests.
bool is_test_client_{false};
};
/// \class GcsClient
/// Abstract interface of the GCS client.
///
/// To read and write from the GCS, `Connect()` must be called and return Status::OK.
/// Before exit, `Disconnect()` must be called.
class GcsClient : public std::enable_shared_from_this<GcsClient> {
public:
virtual ~GcsClient() {}
/// Connect to GCS Service. Non-thread safe.
/// This function must be called before calling other functions.
///
/// \return Status
virtual Status Connect(boost::asio::io_service &io_service) = 0;
/// Disconnect with GCS Service. Non-thread safe.
virtual void Disconnect() = 0;
/// Return client information for debug.
virtual std::string DebugString() const { return ""; }
/// Get the sub-interface for accessing actor information in GCS.
/// This function is thread safe.
ActorInfoAccessor &Actors() {
RAY_CHECK(actor_accessor_ != nullptr);
return *actor_accessor_;
}
/// Get the sub-interface for accessing job information in GCS.
/// This function is thread safe.
JobInfoAccessor &Jobs() {
RAY_CHECK(job_accessor_ != nullptr);
return *job_accessor_;
}
/// Get the sub-interface for accessing object information in GCS.
/// This function is thread safe.
ObjectInfoAccessor &Objects() {
RAY_CHECK(object_accessor_ != nullptr);
return *object_accessor_;
}
/// Get the sub-interface for accessing node information in GCS.
/// This function is thread safe.
NodeInfoAccessor &Nodes() {
RAY_CHECK(node_accessor_ != nullptr);
return *node_accessor_;
}
/// Get the sub-interface for accessing task information in GCS.
/// This function is thread safe.
TaskInfoAccessor &Tasks() {
RAY_CHECK(task_accessor_ != nullptr);
return *task_accessor_;
}
/// Get the sub-interface for accessing error information in GCS.
/// This function is thread safe.
ErrorInfoAccessor &Errors() {
RAY_CHECK(error_accessor_ != nullptr);
return *error_accessor_;
}
/// Get the sub-interface for accessing stats information in GCS.
/// This function is thread safe.
StatsInfoAccessor &Stats() {
RAY_CHECK(stats_accessor_ != nullptr);
return *stats_accessor_;
}
/// Get the sub-interface for accessing worker information in GCS.
/// This function is thread safe.
WorkerInfoAccessor &Workers() {
RAY_CHECK(worker_accessor_ != nullptr);
return *worker_accessor_;
}
protected:
/// Constructor of GcsClient.
///
/// \param options Options for client.
GcsClient(const GcsClientOptions &options) : options_(options) {}
GcsClientOptions options_;
/// Whether this client is connected to GCS.
bool is_connected_{false};
std::unique_ptr<ActorInfoAccessor> actor_accessor_;
std::unique_ptr<JobInfoAccessor> job_accessor_;
std::unique_ptr<ObjectInfoAccessor> object_accessor_;
std::unique_ptr<NodeInfoAccessor> node_accessor_;
std::unique_ptr<TaskInfoAccessor> task_accessor_;
std::unique_ptr<ErrorInfoAccessor> error_accessor_;
std::unique_ptr<StatsInfoAccessor> stats_accessor_;
std::unique_ptr<WorkerInfoAccessor> worker_accessor_;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_GCS_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/actor_info_handler_impl.cc
|
C++
|
#include "actor_info_handler_impl.h"
#include "ray/util/logging.h"
namespace ray {
namespace rpc {
void DefaultActorInfoHandler::HandleGetActorInfo(
const rpc::GetActorInfoRequest &request, rpc::GetActorInfoReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ActorID actor_id = ActorID::FromBinary(request.actor_id());
RAY_LOG(DEBUG) << "Getting actor info, actor id = " << actor_id;
auto on_done = [actor_id, reply, send_reply_callback](
Status status, const boost::optional<ActorTableData> &result) {
if (status.ok()) {
RAY_DCHECK(result);
reply->mutable_actor_table_data()->CopyFrom(*result);
} else {
RAY_LOG(ERROR) << "Failed to get actor info: " << status.ToString()
<< ", actor id = " << actor_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncGet(actor_id, on_done);
if (!status.ok()) {
on_done(status, boost::none);
}
RAY_LOG(DEBUG) << "Finished getting actor info, actor id = " << actor_id;
}
void DefaultActorInfoHandler::HandleRegisterActorInfo(
const rpc::RegisterActorInfoRequest &request, rpc::RegisterActorInfoReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ActorID actor_id = ActorID::FromBinary(request.actor_table_data().actor_id());
RAY_LOG(DEBUG) << "Registering actor info, actor id = " << actor_id;
auto actor_table_data = std::make_shared<ActorTableData>();
actor_table_data->CopyFrom(request.actor_table_data());
auto on_done = [actor_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to register actor info: " << status.ToString()
<< ", actor id = " << actor_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncRegister(actor_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished registering actor info, actor id = " << actor_id;
}
void DefaultActorInfoHandler::HandleUpdateActorInfo(
const rpc::UpdateActorInfoRequest &request, rpc::UpdateActorInfoReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ActorID actor_id = ActorID::FromBinary(request.actor_id());
RAY_LOG(DEBUG) << "Updating actor info, actor id = " << actor_id;
auto actor_table_data = std::make_shared<ActorTableData>();
actor_table_data->CopyFrom(request.actor_table_data());
auto on_done = [actor_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to update actor info: " << status.ToString()
<< ", actor id = " << actor_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncUpdate(actor_id, actor_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished updating actor info, actor id = " << actor_id;
}
void DefaultActorInfoHandler::HandleAddActorCheckpoint(
const AddActorCheckpointRequest &request, AddActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) {
ActorID actor_id = ActorID::FromBinary(request.checkpoint_data().actor_id());
ActorCheckpointID checkpoint_id =
ActorCheckpointID::FromBinary(request.checkpoint_data().checkpoint_id());
RAY_LOG(DEBUG) << "Adding actor checkpoint, actor id = " << actor_id
<< ", checkpoint id = " << checkpoint_id;
auto actor_checkpoint_data = std::make_shared<ActorCheckpointData>();
actor_checkpoint_data->CopyFrom(request.checkpoint_data());
auto on_done = [actor_id, checkpoint_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add actor checkpoint: " << status.ToString()
<< ", actor id = " << actor_id
<< ", checkpoint id = " << checkpoint_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncAddCheckpoint(actor_checkpoint_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding actor checkpoint, actor id = " << actor_id
<< ", checkpoint id = " << checkpoint_id;
}
void DefaultActorInfoHandler::HandleGetActorCheckpoint(
const GetActorCheckpointRequest &request, GetActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) {
ActorCheckpointID checkpoint_id =
ActorCheckpointID::FromBinary(request.checkpoint_id());
RAY_LOG(DEBUG) << "Getting actor checkpoint, checkpoint id = " << checkpoint_id;
auto on_done = [checkpoint_id, reply, send_reply_callback](
Status status, const boost::optional<ActorCheckpointData> &result) {
if (status.ok()) {
RAY_DCHECK(result);
reply->mutable_checkpoint_data()->CopyFrom(*result);
} else {
RAY_LOG(ERROR) << "Failed to get actor checkpoint: " << status.ToString()
<< ", checkpoint id = " << checkpoint_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncGetCheckpoint(checkpoint_id, on_done);
if (!status.ok()) {
on_done(status, boost::none);
}
RAY_LOG(DEBUG) << "Finished getting actor checkpoint, checkpoint id = "
<< checkpoint_id;
}
void DefaultActorInfoHandler::HandleGetActorCheckpointID(
const GetActorCheckpointIDRequest &request, GetActorCheckpointIDReply *reply,
SendReplyCallback send_reply_callback) {
ActorID actor_id = ActorID::FromBinary(request.actor_id());
RAY_LOG(DEBUG) << "Getting actor checkpoint id, actor id = " << actor_id;
auto on_done = [actor_id, reply, send_reply_callback](
Status status,
const boost::optional<ActorCheckpointIdData> &result) {
if (status.ok()) {
RAY_DCHECK(result);
reply->mutable_checkpoint_id_data()->CopyFrom(*result);
} else {
RAY_LOG(ERROR) << "Failed to get actor checkpoint id: " << status.ToString()
<< ", actor id = " << actor_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Actors().AsyncGetCheckpointID(actor_id, on_done);
if (!status.ok()) {
on_done(status, boost::none);
}
RAY_LOG(DEBUG) << "Finished getting actor checkpoint id, actor id = " << actor_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/actor_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_ACTOR_INFO_HANDLER_IMPL_H
#define RAY_GCS_ACTOR_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `ActorInfoHandler`.
class DefaultActorInfoHandler : public rpc::ActorInfoHandler {
public:
explicit DefaultActorInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleGetActorInfo(const GetActorInfoRequest &request, GetActorInfoReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleRegisterActorInfo(const RegisterActorInfoRequest &request,
RegisterActorInfoReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleUpdateActorInfo(const UpdateActorInfoRequest &request,
UpdateActorInfoReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleAddActorCheckpoint(const AddActorCheckpointRequest &request,
AddActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleGetActorCheckpoint(const GetActorCheckpointRequest &request,
GetActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleGetActorCheckpointID(const GetActorCheckpointIDRequest &request,
GetActorCheckpointIDReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_ACTOR_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/error_info_handler_impl.cc
|
C++
|
#include "error_info_handler_impl.h"
namespace ray {
namespace rpc {
void DefaultErrorInfoHandler::HandleReportJobError(
const ReportJobErrorRequest &request, ReportJobErrorReply *reply,
SendReplyCallback send_reply_callback) {
JobID job_id = JobID::FromBinary(request.error_data().job_id());
std::string type = request.error_data().type();
RAY_LOG(DEBUG) << "Reporting job error, job id = " << job_id << ", type = " << type;
auto error_table_data = std::make_shared<ErrorTableData>();
error_table_data->CopyFrom(request.error_data());
auto on_done = [job_id, type, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to report job error, job id = " << job_id
<< ", type = " << type;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Errors().AsyncReportJobError(error_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished reporting job error, job id = " << job_id
<< ", type = " << type;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/error_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_ERROR_INFO_HANDLER_IMPL_H
#define RAY_GCS_ERROR_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `ErrorInfoHandler`.
class DefaultErrorInfoHandler : public rpc::ErrorInfoHandler {
public:
explicit DefaultErrorInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleReportJobError(const ReportJobErrorRequest &request,
ReportJobErrorReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_ERROR_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/gcs_server.cc
|
C++
|
#include "gcs_server.h"
#include "actor_info_handler_impl.h"
#include "error_info_handler_impl.h"
#include "job_info_handler_impl.h"
#include "node_info_handler_impl.h"
#include "object_info_handler_impl.h"
#include "stats_handler_impl.h"
#include "task_info_handler_impl.h"
#include "worker_info_handler_impl.h"
namespace ray {
namespace gcs {
GcsServer::GcsServer(const ray::gcs::GcsServerConfig &config)
: config_(config),
rpc_server_(config.grpc_server_name, config.grpc_server_port,
config.grpc_server_thread_num) {}
GcsServer::~GcsServer() { Stop(); }
void GcsServer::Start() {
// Init backend client.
InitBackendClient();
// Register rpc service.
job_info_handler_ = InitJobInfoHandler();
job_info_service_.reset(new rpc::JobInfoGrpcService(main_service_, *job_info_handler_));
rpc_server_.RegisterService(*job_info_service_);
actor_info_handler_ = InitActorInfoHandler();
actor_info_service_.reset(
new rpc::ActorInfoGrpcService(main_service_, *actor_info_handler_));
rpc_server_.RegisterService(*actor_info_service_);
node_info_handler_ = InitNodeInfoHandler();
node_info_service_.reset(
new rpc::NodeInfoGrpcService(main_service_, *node_info_handler_));
rpc_server_.RegisterService(*node_info_service_);
object_info_handler_ = InitObjectInfoHandler();
object_info_service_.reset(
new rpc::ObjectInfoGrpcService(main_service_, *object_info_handler_));
rpc_server_.RegisterService(*object_info_service_);
task_info_handler_ = InitTaskInfoHandler();
task_info_service_.reset(
new rpc::TaskInfoGrpcService(main_service_, *task_info_handler_));
rpc_server_.RegisterService(*task_info_service_);
stats_handler_ = InitStatsHandler();
stats_service_.reset(new rpc::StatsGrpcService(main_service_, *stats_handler_));
rpc_server_.RegisterService(*stats_service_);
error_info_handler_ = InitErrorInfoHandler();
error_info_service_.reset(
new rpc::ErrorInfoGrpcService(main_service_, *error_info_handler_));
rpc_server_.RegisterService(*error_info_service_);
worker_info_handler_ = InitWorkerInfoHandler();
worker_info_service_.reset(
new rpc::WorkerInfoGrpcService(main_service_, *worker_info_handler_));
rpc_server_.RegisterService(*worker_info_service_);
// Run rpc server.
rpc_server_.Run();
// Run the event loop.
// Using boost::asio::io_context::work to avoid ending the event loop when
// there are no events to handle.
boost::asio::io_context::work worker(main_service_);
main_service_.run();
}
void GcsServer::Stop() {
// Shutdown the rpc server
rpc_server_.Shutdown();
// Stop the event loop.
main_service_.stop();
}
void GcsServer::InitBackendClient() {
GcsClientOptions options(config_.redis_address, config_.redis_port,
config_.redis_password, config_.is_test);
redis_gcs_client_ = std::make_shared<RedisGcsClient>(options);
auto status = redis_gcs_client_->Connect(main_service_);
RAY_CHECK(status.ok()) << "Failed to init redis gcs client as " << status;
}
std::unique_ptr<rpc::JobInfoHandler> GcsServer::InitJobInfoHandler() {
return std::unique_ptr<rpc::DefaultJobInfoHandler>(
new rpc::DefaultJobInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::ActorInfoHandler> GcsServer::InitActorInfoHandler() {
return std::unique_ptr<rpc::DefaultActorInfoHandler>(
new rpc::DefaultActorInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::NodeInfoHandler> GcsServer::InitNodeInfoHandler() {
return std::unique_ptr<rpc::DefaultNodeInfoHandler>(
new rpc::DefaultNodeInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::ObjectInfoHandler> GcsServer::InitObjectInfoHandler() {
return std::unique_ptr<rpc::DefaultObjectInfoHandler>(
new rpc::DefaultObjectInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::TaskInfoHandler> GcsServer::InitTaskInfoHandler() {
return std::unique_ptr<rpc::DefaultTaskInfoHandler>(
new rpc::DefaultTaskInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::StatsHandler> GcsServer::InitStatsHandler() {
return std::unique_ptr<rpc::DefaultStatsHandler>(
new rpc::DefaultStatsHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::ErrorInfoHandler> GcsServer::InitErrorInfoHandler() {
return std::unique_ptr<rpc::DefaultErrorInfoHandler>(
new rpc::DefaultErrorInfoHandler(*redis_gcs_client_));
}
std::unique_ptr<rpc::WorkerInfoHandler> GcsServer::InitWorkerInfoHandler() {
return std::unique_ptr<rpc::DefaultWorkerInfoHandler>(
new rpc::DefaultWorkerInfoHandler(*redis_gcs_client_));
}
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/gcs_server.h
|
C/C++ Header
|
#ifndef RAY_GCS_GCS_SERVER_H
#define RAY_GCS_GCS_SERVER_H
#include <ray/gcs/redis_gcs_client.h>
#include <ray/rpc/gcs_server/gcs_rpc_server.h>
namespace ray {
namespace gcs {
struct GcsServerConfig {
std::string grpc_server_name = "GcsServer";
uint16_t grpc_server_port = 0;
uint16_t grpc_server_thread_num = 1;
std::string redis_password;
std::string redis_address;
uint16_t redis_port = 6379;
bool retry_redis = true;
bool is_test = false;
};
/// The GcsServer will take over all requests from ServiceBasedGcsClient and transparent
/// transmit the command to the backend reliable storage for the time being.
/// In the future, GCS server's main responsibility is to manage meta data
/// and the management of actor creation.
/// For more details, please see the design document.
/// https://docs.google.com/document/d/1d-9qBlsh2UQHo-AWMWR0GptI_Ajwu4SKx0Q0LHKPpeI/edit#heading=h.csi0gaglj2pv
class GcsServer {
public:
explicit GcsServer(const GcsServerConfig &config);
virtual ~GcsServer();
/// Start gcs server.
void Start();
/// Stop gcs server.
void Stop();
/// Get the port of this gcs server.
int GetPort() const { return rpc_server_.GetPort(); }
protected:
/// Initialize the backend storage client
/// The gcs server is just the proxy between the gcs client and reliable storage
/// for the time being, so we need a backend client to connect to the storage.
virtual void InitBackendClient();
/// The job info handler
virtual std::unique_ptr<rpc::JobInfoHandler> InitJobInfoHandler();
/// The actor info handler
virtual std::unique_ptr<rpc::ActorInfoHandler> InitActorInfoHandler();
/// The node info handler
virtual std::unique_ptr<rpc::NodeInfoHandler> InitNodeInfoHandler();
/// The object info handler
virtual std::unique_ptr<rpc::ObjectInfoHandler> InitObjectInfoHandler();
/// The task info handler
virtual std::unique_ptr<rpc::TaskInfoHandler> InitTaskInfoHandler();
/// The stats handler
virtual std::unique_ptr<rpc::StatsHandler> InitStatsHandler();
/// The error info handler
virtual std::unique_ptr<rpc::ErrorInfoHandler> InitErrorInfoHandler();
/// The worker info handler
virtual std::unique_ptr<rpc::WorkerInfoHandler> InitWorkerInfoHandler();
private:
/// Gcs server configuration
GcsServerConfig config_;
/// The grpc server
rpc::GrpcServer rpc_server_;
/// The main io service to drive event posted from grpc threads.
boost::asio::io_context main_service_;
/// Job info handler and service
std::unique_ptr<rpc::JobInfoHandler> job_info_handler_;
std::unique_ptr<rpc::JobInfoGrpcService> job_info_service_;
/// Actor info handler and service
std::unique_ptr<rpc::ActorInfoHandler> actor_info_handler_;
std::unique_ptr<rpc::ActorInfoGrpcService> actor_info_service_;
/// Node info handler and service
std::unique_ptr<rpc::NodeInfoHandler> node_info_handler_;
std::unique_ptr<rpc::NodeInfoGrpcService> node_info_service_;
/// Object info handler and service
std::unique_ptr<rpc::ObjectInfoHandler> object_info_handler_;
std::unique_ptr<rpc::ObjectInfoGrpcService> object_info_service_;
/// Task info handler and service
std::unique_ptr<rpc::TaskInfoHandler> task_info_handler_;
std::unique_ptr<rpc::TaskInfoGrpcService> task_info_service_;
/// Stats handler and service
std::unique_ptr<rpc::StatsHandler> stats_handler_;
std::unique_ptr<rpc::StatsGrpcService> stats_service_;
/// Error info handler and service
std::unique_ptr<rpc::ErrorInfoHandler> error_info_handler_;
std::unique_ptr<rpc::ErrorInfoGrpcService> error_info_service_;
/// Worker info handler and service
std::unique_ptr<rpc::WorkerInfoHandler> worker_info_handler_;
std::unique_ptr<rpc::WorkerInfoGrpcService> worker_info_service_;
/// Backend client
std::shared_ptr<RedisGcsClient> redis_gcs_client_;
};
} // namespace gcs
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/gcs_server_main.cc
|
C++
|
#include <iostream>
#include "ray/common/ray_config.h"
#include "ray/gcs/gcs_server/gcs_server.h"
#include "ray/util/util.h"
#include "gflags/gflags.h"
DEFINE_string(redis_address, "", "The ip address of redis.");
DEFINE_int32(redis_port, -1, "The port of redis.");
DEFINE_string(config_list, "", "The config list of raylet.");
DEFINE_string(redis_password, "", "The password of redis.");
DEFINE_bool(retry_redis, false, "Whether we retry to connect to the redis.");
int main(int argc, char *argv[]) {
InitShutdownRAII ray_log_shutdown_raii(ray::RayLog::StartRayLog,
ray::RayLog::ShutDownRayLog, argv[0],
ray::RayLogLevel::INFO, /*log_dir=*/"");
ray::RayLog::InstallFailureSignalHandler();
gflags::ParseCommandLineFlags(&argc, &argv, true);
const std::string redis_address = FLAGS_redis_address;
const int redis_port = static_cast<int>(FLAGS_redis_port);
const std::string config_list = FLAGS_config_list;
const std::string redis_password = FLAGS_redis_password;
const bool retry_redis = FLAGS_retry_redis;
gflags::ShutDownCommandLineFlags();
std::unordered_map<std::string, std::string> config_map;
// Parse the configuration list.
std::istringstream config_string(config_list);
std::string config_name;
std::string config_value;
while (std::getline(config_string, config_name, ',')) {
RAY_CHECK(std::getline(config_string, config_value, ','));
config_map[config_name] = config_value;
}
RayConfig::instance().initialize(config_map);
ray::gcs::GcsServerConfig gcs_server_config;
gcs_server_config.grpc_server_name = "GcsServer";
gcs_server_config.grpc_server_port = 0;
gcs_server_config.grpc_server_thread_num = 1;
gcs_server_config.redis_address = redis_address;
gcs_server_config.redis_port = redis_port;
gcs_server_config.redis_password = redis_password;
gcs_server_config.retry_redis = retry_redis;
ray::gcs::GcsServer gcs_server(gcs_server_config);
gcs_server.Start();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/job_info_handler_impl.cc
|
C++
|
#include "job_info_handler_impl.h"
namespace ray {
namespace rpc {
void DefaultJobInfoHandler::HandleAddJob(const rpc::AddJobRequest &request,
rpc::AddJobReply *reply,
rpc::SendReplyCallback send_reply_callback) {
JobID job_id = JobID::FromBinary(request.data().job_id());
RAY_LOG(DEBUG) << "Adding job, job id = " << job_id
<< ", driver pid = " << request.data().driver_pid();
auto job_table_data = std::make_shared<JobTableData>();
job_table_data->CopyFrom(request.data());
auto on_done = [job_id, request, reply, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add job, job id = " << job_id
<< ", driver pid = " << request.data().driver_pid();
}
reply->set_success(status.ok());
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Jobs().AsyncAdd(job_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding job, job id = " << job_id
<< ", driver pid = " << request.data().driver_pid();
}
void DefaultJobInfoHandler::HandleMarkJobFinished(
const rpc::MarkJobFinishedRequest &request, rpc::MarkJobFinishedReply *reply,
rpc::SendReplyCallback send_reply_callback) {
JobID job_id = JobID::FromBinary(request.job_id());
RAY_LOG(DEBUG) << "Marking job state, job id = " << job_id;
auto on_done = [job_id, reply, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to mark job state, job id = " << job_id;
}
reply->set_success(status.ok());
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Jobs().AsyncMarkFinished(job_id, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished marking job state, job id = " << job_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/job_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_JOB_INFO_HANDLER_IMPL_H
#define RAY_GCS_JOB_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `JobInfoHandler`.
class DefaultJobInfoHandler : public rpc::JobInfoHandler {
public:
explicit DefaultJobInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleAddJob(const AddJobRequest &request, AddJobReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleMarkJobFinished(const MarkJobFinishedRequest &request,
MarkJobFinishedReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_JOB_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/node_info_handler_impl.cc
|
C++
|
#include "node_info_handler_impl.h"
#include "ray/util/logging.h"
namespace ray {
namespace rpc {
void DefaultNodeInfoHandler::HandleRegisterNode(
const rpc::RegisterNodeRequest &request, rpc::RegisterNodeReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.node_info().node_id());
RAY_LOG(DEBUG) << "Registering node info, node id = " << node_id;
auto on_done = [node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to register node info: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Nodes().AsyncRegister(request.node_info(), on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished registering node info, node id = " << node_id;
}
void DefaultNodeInfoHandler::HandleUnregisterNode(
const rpc::UnregisterNodeRequest &request, rpc::UnregisterNodeReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.node_id());
RAY_LOG(DEBUG) << "Unregistering node info, node id = " << node_id;
auto on_done = [node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to unregister node info: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Nodes().AsyncUnregister(node_id, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished unregistering node info, node id = " << node_id;
}
void DefaultNodeInfoHandler::HandleGetAllNodeInfo(
const rpc::GetAllNodeInfoRequest &request, rpc::GetAllNodeInfoReply *reply,
rpc::SendReplyCallback send_reply_callback) {
RAY_LOG(DEBUG) << "Getting all nodes info.";
auto on_done = [reply, send_reply_callback](
Status status, const std::vector<rpc::GcsNodeInfo> &result) {
if (status.ok()) {
for (const rpc::GcsNodeInfo &node_info : result) {
reply->add_node_info_list()->CopyFrom(node_info);
}
} else {
RAY_LOG(ERROR) << "Failed to get all nodes info: " << status.ToString();
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Nodes().AsyncGetAll(on_done);
if (!status.ok()) {
on_done(status, std::vector<rpc::GcsNodeInfo>());
}
RAY_LOG(DEBUG) << "Finished getting all node info.";
}
void DefaultNodeInfoHandler::HandleReportHeartbeat(
const ReportHeartbeatRequest &request, ReportHeartbeatReply *reply,
SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.heartbeat().client_id());
RAY_LOG(DEBUG) << "Reporting heartbeat, node id = " << node_id;
auto on_done = [node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to report heartbeat: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
auto heartbeat_data = std::make_shared<rpc::HeartbeatTableData>();
heartbeat_data->CopyFrom(request.heartbeat());
Status status = gcs_client_.Nodes().AsyncReportHeartbeat(heartbeat_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished reporting heartbeat, node id = " << node_id;
}
void DefaultNodeInfoHandler::HandleReportBatchHeartbeat(
const ReportBatchHeartbeatRequest &request, ReportBatchHeartbeatReply *reply,
SendReplyCallback send_reply_callback) {
RAY_LOG(DEBUG) << "Reporting batch heartbeat, batch size = "
<< request.heartbeat_batch().batch_size();
auto on_done = [&request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to report batch heartbeat: " << status.ToString()
<< ", batch size = " << request.heartbeat_batch().batch_size();
}
send_reply_callback(status, nullptr, nullptr);
};
auto heartbeat_batch_data = std::make_shared<rpc::HeartbeatBatchTableData>();
heartbeat_batch_data->CopyFrom(request.heartbeat_batch());
Status status =
gcs_client_.Nodes().AsyncReportBatchHeartbeat(heartbeat_batch_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished reporting batch heartbeat, batch size = "
<< request.heartbeat_batch().batch_size();
}
void DefaultNodeInfoHandler::HandleGetResources(const GetResourcesRequest &request,
GetResourcesReply *reply,
SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.node_id());
RAY_LOG(DEBUG) << "Getting node resources, node id = " << node_id;
auto on_done = [node_id, reply, send_reply_callback](
Status status,
const boost::optional<gcs::NodeInfoAccessor::ResourceMap> &result) {
if (status.ok()) {
if (result) {
for (auto &resource : *result) {
(*reply->mutable_resources())[resource.first] = *resource.second;
}
}
} else {
RAY_LOG(ERROR) << "Failed to get node resources: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Nodes().AsyncGetResources(node_id, on_done);
if (!status.ok()) {
on_done(status, boost::none);
}
RAY_LOG(DEBUG) << "Finished getting node resources, node id = " << node_id;
}
void DefaultNodeInfoHandler::HandleUpdateResources(
const UpdateResourcesRequest &request, UpdateResourcesReply *reply,
SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.node_id());
gcs::NodeInfoAccessor::ResourceMap resources;
for (auto resource : request.resources()) {
resources[resource.first] = std::make_shared<rpc::ResourceTableData>(resource.second);
}
RAY_LOG(DEBUG) << "Updating node resources, node id = " << node_id;
auto on_done = [node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to update node resources: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Nodes().AsyncUpdateResources(node_id, resources, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished updating node resources, node id = " << node_id;
}
void DefaultNodeInfoHandler::HandleDeleteResources(
const DeleteResourcesRequest &request, DeleteResourcesReply *reply,
SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.node_id());
auto resource_names = VectorFromProtobuf(request.resource_name_list());
RAY_LOG(DEBUG) << "Deleting node resources, node id = " << node_id;
auto on_done = [node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to delete node resources: " << status.ToString()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status =
gcs_client_.Nodes().AsyncDeleteResources(node_id, resource_names, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished deleting node resources, node id = " << node_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/node_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_NODE_INFO_HANDLER_IMPL_H
#define RAY_GCS_NODE_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `NodeInfoHandler`.
class DefaultNodeInfoHandler : public rpc::NodeInfoHandler {
public:
explicit DefaultNodeInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleRegisterNode(const RegisterNodeRequest &request, RegisterNodeReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleUnregisterNode(const UnregisterNodeRequest &request,
UnregisterNodeReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleGetAllNodeInfo(const GetAllNodeInfoRequest &request,
GetAllNodeInfoReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleReportHeartbeat(const ReportHeartbeatRequest &request,
ReportHeartbeatReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleReportBatchHeartbeat(const ReportBatchHeartbeatRequest &request,
ReportBatchHeartbeatReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleGetResources(const GetResourcesRequest &request, GetResourcesReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleUpdateResources(const UpdateResourcesRequest &request,
UpdateResourcesReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleDeleteResources(const DeleteResourcesRequest &request,
DeleteResourcesReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_NODE_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/object_info_handler_impl.cc
|
C++
|
#include "object_info_handler_impl.h"
#include "ray/util/logging.h"
namespace ray {
namespace rpc {
void DefaultObjectInfoHandler::HandleGetObjectLocations(
const GetObjectLocationsRequest &request, GetObjectLocationsReply *reply,
SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
RAY_LOG(DEBUG) << "Getting object locations, object id = " << object_id;
auto on_done = [reply, object_id, send_reply_callback](
Status status, const std::vector<rpc::ObjectTableData> &result) {
if (status.ok()) {
for (const rpc::ObjectTableData &object_table_data : result) {
reply->add_object_table_data_list()->CopyFrom(object_table_data);
}
} else {
RAY_LOG(ERROR) << "Failed to get object locations: " << status.ToString()
<< ", object id = " << object_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Objects().AsyncGetLocations(object_id, on_done);
if (!status.ok()) {
on_done(status, std::vector<rpc::ObjectTableData>());
}
RAY_LOG(DEBUG) << "Finished getting object locations, object id = " << object_id;
}
void DefaultObjectInfoHandler::HandleAddObjectLocation(
const AddObjectLocationRequest &request, AddObjectLocationReply *reply,
SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
ClientID node_id = ClientID::FromBinary(request.node_id());
RAY_LOG(DEBUG) << "Adding object location, object id = " << object_id
<< ", node id = " << node_id;
auto on_done = [object_id, node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add object location: " << status.ToString()
<< ", object id = " << object_id << ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Objects().AsyncAddLocation(object_id, node_id, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding object location, object id = " << object_id
<< ", node id = " << node_id;
}
void DefaultObjectInfoHandler::HandleRemoveObjectLocation(
const RemoveObjectLocationRequest &request, RemoveObjectLocationReply *reply,
SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
ClientID node_id = ClientID::FromBinary(request.node_id());
RAY_LOG(DEBUG) << "Removing object location, object id = " << object_id
<< ", node id = " << node_id;
auto on_done = [object_id, node_id, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add object location: " << status.ToString()
<< ", object id = " << object_id << ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Objects().AsyncRemoveLocation(object_id, node_id, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished removing object location, object id = " << object_id
<< ", node id = " << node_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/object_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_OBJECT_INFO_HANDLER_IMPL_H
#define RAY_GCS_OBJECT_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `ObjectInfoHandler`.
class DefaultObjectInfoHandler : public rpc::ObjectInfoHandler {
public:
explicit DefaultObjectInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleGetObjectLocations(const GetObjectLocationsRequest &request,
GetObjectLocationsReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleAddObjectLocation(const AddObjectLocationRequest &request,
AddObjectLocationReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleRemoveObjectLocation(const RemoveObjectLocationRequest &request,
RemoveObjectLocationReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_OBJECT_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/stats_handler_impl.cc
|
C++
|
#include "stats_handler_impl.h"
namespace ray {
namespace rpc {
void DefaultStatsHandler::HandleAddProfileData(const AddProfileDataRequest &request,
AddProfileDataReply *reply,
SendReplyCallback send_reply_callback) {
ClientID node_id = ClientID::FromBinary(request.profile_data().component_id());
RAY_LOG(DEBUG) << "Adding profile data, component type = "
<< request.profile_data().component_type() << ", node id = " << node_id;
auto profile_table_data = std::make_shared<ProfileTableData>();
profile_table_data->CopyFrom(request.profile_data());
auto on_done = [node_id, request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add profile data, component type = "
<< request.profile_data().component_type()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Stats().AsyncAddProfileData(profile_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding profile data, component type = "
<< request.profile_data().component_type() << ", node id = " << node_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/stats_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_STATS_HANDLER_IMPL_H
#define RAY_GCS_STATS_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `StatsHandler`.
class DefaultStatsHandler : public rpc::StatsHandler {
public:
explicit DefaultStatsHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleAddProfileData(const AddProfileDataRequest &request,
AddProfileDataReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_STATS_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/task_info_handler_impl.cc
|
C++
|
#include "task_info_handler_impl.h"
namespace ray {
namespace rpc {
void DefaultTaskInfoHandler::HandleAddTask(const AddTaskRequest &request,
AddTaskReply *reply,
SendReplyCallback send_reply_callback) {
JobID job_id = JobID::FromBinary(request.task_data().task().task_spec().job_id());
TaskID task_id = TaskID::FromBinary(request.task_data().task().task_spec().task_id());
RAY_LOG(DEBUG) << "Adding task, task id = " << task_id << ", job id = " << job_id;
auto task_table_data = std::make_shared<TaskTableData>();
task_table_data->CopyFrom(request.task_data());
auto on_done = [job_id, task_id, request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add task, task id = " << task_id
<< ", job id = " << job_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Tasks().AsyncAdd(task_table_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding task, task id = " << task_id
<< ", job id = " << job_id;
}
void DefaultTaskInfoHandler::HandleGetTask(const GetTaskRequest &request,
GetTaskReply *reply,
SendReplyCallback send_reply_callback) {
TaskID task_id = TaskID::FromBinary(request.task_id());
RAY_LOG(DEBUG) << "Getting task, task id = " << task_id;
auto on_done = [task_id, request, reply, send_reply_callback](
Status status, const boost::optional<TaskTableData> &result) {
if (status.ok()) {
RAY_DCHECK(result);
reply->mutable_task_data()->CopyFrom(*result);
} else {
RAY_LOG(ERROR) << "Failed to get task, task id = " << task_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Tasks().AsyncGet(task_id, on_done);
if (!status.ok()) {
on_done(status, boost::none);
}
RAY_LOG(DEBUG) << "Finished getting task, task id = " << task_id;
}
void DefaultTaskInfoHandler::HandleDeleteTasks(const DeleteTasksRequest &request,
DeleteTasksReply *reply,
SendReplyCallback send_reply_callback) {
std::vector<TaskID> task_ids = IdVectorFromProtobuf<TaskID>(request.task_id_list());
RAY_LOG(DEBUG) << "Deleting tasks, task id list size = " << task_ids.size();
auto on_done = [task_ids, request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to delete tasks, task id list size = " << task_ids.size();
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Tasks().AsyncDelete(task_ids, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished deleting tasks, task id list size = " << task_ids.size();
}
void DefaultTaskInfoHandler::HandleAddTaskLease(const AddTaskLeaseRequest &request,
AddTaskLeaseReply *reply,
SendReplyCallback send_reply_callback) {
TaskID task_id = TaskID::FromBinary(request.task_lease_data().task_id());
ClientID node_id = ClientID::FromBinary(request.task_lease_data().node_manager_id());
RAY_LOG(DEBUG) << "Adding task lease, task id = " << task_id
<< ", node id = " << node_id;
auto task_lease_data = std::make_shared<TaskLeaseData>();
task_lease_data->CopyFrom(request.task_lease_data());
auto on_done = [task_id, node_id, request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to add task lease, task id = " << task_id
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status = gcs_client_.Tasks().AsyncAddTaskLease(task_lease_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished adding task lease, task id = " << task_id
<< ", node id = " << node_id;
}
void DefaultTaskInfoHandler::HandleAttemptTaskReconstruction(
const AttemptTaskReconstructionRequest &request,
AttemptTaskReconstructionReply *reply, SendReplyCallback send_reply_callback) {
ClientID node_id =
ClientID::FromBinary(request.task_reconstruction().node_manager_id());
RAY_LOG(DEBUG) << "Reconstructing task, reconstructions num = "
<< request.task_reconstruction().num_reconstructions()
<< ", node id = " << node_id;
auto task_reconstruction_data = std::make_shared<TaskReconstructionData>();
task_reconstruction_data->CopyFrom(request.task_reconstruction());
auto on_done = [node_id, request, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to reconstruct task, reconstructions num = "
<< request.task_reconstruction().num_reconstructions()
<< ", node id = " << node_id;
}
send_reply_callback(status, nullptr, nullptr);
};
Status status =
gcs_client_.Tasks().AttemptTaskReconstruction(task_reconstruction_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished reconstructing task, reconstructions num = "
<< request.task_reconstruction().num_reconstructions()
<< ", node id = " << node_id;
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/task_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_TASK_INFO_HANDLER_IMPL_H
#define RAY_GCS_TASK_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `TaskInfoHandler`.
class DefaultTaskInfoHandler : public rpc::TaskInfoHandler {
public:
explicit DefaultTaskInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleAddTask(const AddTaskRequest &request, AddTaskReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleGetTask(const GetTaskRequest &request, GetTaskReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleDeleteTasks(const DeleteTasksRequest &request, DeleteTasksReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleAddTaskLease(const AddTaskLeaseRequest &request, AddTaskLeaseReply *reply,
SendReplyCallback send_reply_callback) override;
void HandleAttemptTaskReconstruction(const AttemptTaskReconstructionRequest &request,
AttemptTaskReconstructionReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_TASK_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc
|
C++
|
#include "gtest/gtest.h"
#include "ray/gcs/gcs_server/gcs_server.h"
#include "ray/rpc/gcs_server/gcs_rpc_client.h"
#include "ray/util/test_util.h"
namespace ray {
static std::string redis_server_executable;
static std::string redis_client_executable;
static std::string libray_redis_module_path;
class GcsServerTest : public RedisServiceManagerForTest {
public:
void SetUp() override {
gcs::GcsServerConfig config;
config.grpc_server_port = 0;
config.grpc_server_name = "MockedGcsServer";
config.grpc_server_thread_num = 1;
config.redis_address = "127.0.0.1";
config.is_test = true;
config.redis_port = REDIS_SERVER_PORT;
gcs_server_.reset(new gcs::GcsServer(config));
thread_io_service_.reset(new std::thread([this] {
std::unique_ptr<boost::asio::io_service::work> work(
new boost::asio::io_service::work(io_service_));
io_service_.run();
}));
thread_gcs_server_.reset(new std::thread([this] { gcs_server_->Start(); }));
// Wait until server starts listening.
while (gcs_server_->GetPort() == 0) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
// Create gcs rpc client
client_call_manager_.reset(new rpc::ClientCallManager(io_service_));
client_.reset(
new rpc::GcsRpcClient("0.0.0.0", gcs_server_->GetPort(), *client_call_manager_));
}
void TearDown() override {
gcs_server_->Stop();
io_service_.stop();
thread_io_service_->join();
thread_gcs_server_->join();
}
bool AddJob(const rpc::AddJobRequest &request) {
std::promise<bool> promise;
client_->AddJob(request,
[&promise](const Status &status, const rpc::AddJobReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool MarkJobFinished(const rpc::MarkJobFinishedRequest &request) {
std::promise<bool> promise;
client_->MarkJobFinished(request, [&promise](const Status &status,
const rpc::MarkJobFinishedReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool RegisterActorInfo(const rpc::RegisterActorInfoRequest &request) {
std::promise<bool> promise;
client_->RegisterActorInfo(
request,
[&promise](const Status &status, const rpc::RegisterActorInfoReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool UpdateActorInfo(const rpc::UpdateActorInfoRequest &request) {
std::promise<bool> promise;
client_->UpdateActorInfo(request, [&promise](const Status &status,
const rpc::UpdateActorInfoReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
rpc::ActorTableData GetActorInfo(const std::string &actor_id) {
rpc::GetActorInfoRequest request;
request.set_actor_id(actor_id);
rpc::ActorTableData actor_table_data;
std::promise<bool> promise;
client_->GetActorInfo(
request, [&actor_table_data, &promise](const Status &status,
const rpc::GetActorInfoReply &reply) {
RAY_CHECK_OK(status);
actor_table_data.CopyFrom(reply.actor_table_data());
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return actor_table_data;
}
bool AddActorCheckpoint(const rpc::AddActorCheckpointRequest &request) {
std::promise<bool> promise;
client_->AddActorCheckpoint(
request,
[&promise](const Status &status, const rpc::AddActorCheckpointReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
rpc::ActorCheckpointData GetActorCheckpoint(const std::string &checkpoint_id) {
rpc::GetActorCheckpointRequest request;
request.set_checkpoint_id(checkpoint_id);
rpc::ActorCheckpointData checkpoint_data;
std::promise<bool> promise;
client_->GetActorCheckpoint(
request, [&checkpoint_data, &promise](const Status &status,
const rpc::GetActorCheckpointReply &reply) {
RAY_CHECK_OK(status);
checkpoint_data.CopyFrom(reply.checkpoint_data());
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return checkpoint_data;
}
rpc::ActorCheckpointIdData GetActorCheckpointID(const std::string &actor_id) {
rpc::GetActorCheckpointIDRequest request;
request.set_actor_id(actor_id);
rpc::ActorCheckpointIdData checkpoint_id_data;
std::promise<bool> promise;
client_->GetActorCheckpointID(
request, [&checkpoint_id_data, &promise](
const Status &status, const rpc::GetActorCheckpointIDReply &reply) {
RAY_CHECK_OK(status);
checkpoint_id_data.CopyFrom(reply.checkpoint_id_data());
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return checkpoint_id_data;
}
bool RegisterNode(const rpc::RegisterNodeRequest &request) {
std::promise<bool> promise;
client_->RegisterNode(
request, [&promise](const Status &status, const rpc::RegisterNodeReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool UnregisterNode(const rpc::UnregisterNodeRequest &request) {
std::promise<bool> promise;
client_->UnregisterNode(
request, [&promise](const Status &status, const rpc::UnregisterNodeReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
std::vector<rpc::GcsNodeInfo> GetAllNodeInfo() {
std::vector<rpc::GcsNodeInfo> node_info_list;
rpc::GetAllNodeInfoRequest request;
std::promise<bool> promise;
client_->GetAllNodeInfo(
request, [&node_info_list, &promise](const Status &status,
const rpc::GetAllNodeInfoReply &reply) {
RAY_CHECK_OK(status);
for (int index = 0; index < reply.node_info_list_size(); ++index) {
node_info_list.push_back(reply.node_info_list(index));
}
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return node_info_list;
}
bool ReportHeartbeat(const rpc::ReportHeartbeatRequest &request) {
std::promise<bool> promise;
client_->ReportHeartbeat(request, [&promise](const Status &status,
const rpc::ReportHeartbeatReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool ReportBatchHeartbeat(const rpc::ReportBatchHeartbeatRequest &request) {
std::promise<bool> promise;
client_->ReportBatchHeartbeat(
request,
[&promise](const Status &status, const rpc::ReportBatchHeartbeatReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool UpdateResources(const rpc::UpdateResourcesRequest &request) {
std::promise<bool> promise;
client_->UpdateResources(request, [&promise](const Status &status,
const rpc::UpdateResourcesReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool DeleteResources(const rpc::DeleteResourcesRequest &request) {
std::promise<bool> promise;
client_->DeleteResources(request, [&promise](const Status &status,
const rpc::DeleteResourcesReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
std::map<std::string, gcs::ResourceTableData> GetResources(const std::string &node_id) {
rpc::GetResourcesRequest request;
request.set_node_id(node_id);
std::map<std::string, gcs::ResourceTableData> resources;
std::promise<bool> promise;
client_->GetResources(request,
[&resources, &promise](const Status &status,
const rpc::GetResourcesReply &reply) {
RAY_CHECK_OK(status);
for (auto resource : reply.resources()) {
resources[resource.first] = resource.second;
}
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return resources;
}
bool AddObjectLocation(const rpc::AddObjectLocationRequest &request) {
std::promise<bool> promise;
client_->AddObjectLocation(
request,
[&promise](const Status &status, const rpc::AddObjectLocationReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool RemoveObjectLocation(const rpc::RemoveObjectLocationRequest &request) {
std::promise<bool> promise;
client_->RemoveObjectLocation(
request,
[&promise](const Status &status, const rpc::RemoveObjectLocationReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
std::vector<rpc::ObjectTableData> GetObjectLocations(const std::string &object_id) {
std::vector<rpc::ObjectTableData> object_locations;
rpc::GetObjectLocationsRequest request;
request.set_object_id(object_id);
std::promise<bool> promise;
client_->GetObjectLocations(
request, [&object_locations, &promise](
const Status &status, const rpc::GetObjectLocationsReply &reply) {
RAY_CHECK_OK(status);
for (int index = 0; index < reply.object_table_data_list_size(); ++index) {
object_locations.push_back(reply.object_table_data_list(index));
}
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return object_locations;
}
bool AddTask(const rpc::AddTaskRequest &request) {
std::promise<bool> promise;
client_->AddTask(request,
[&promise](const Status &status, const rpc::AddTaskReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
rpc::TaskTableData GetTask(const std::string &task_id) {
rpc::TaskTableData task_data;
rpc::GetTaskRequest request;
request.set_task_id(task_id);
std::promise<bool> promise;
client_->GetTask(request, [&task_data, &promise](const Status &status,
const rpc::GetTaskReply &reply) {
if (status.ok()) {
task_data.CopyFrom(reply.task_data());
}
promise.set_value(true);
});
EXPECT_TRUE(WaitReady(promise.get_future(), timeout_ms_));
return task_data;
}
bool DeleteTasks(const rpc::DeleteTasksRequest &request) {
std::promise<bool> promise;
client_->DeleteTasks(
request, [&promise](const Status &status, const rpc::DeleteTasksReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool AddTaskLease(const rpc::AddTaskLeaseRequest &request) {
std::promise<bool> promise;
client_->AddTaskLease(
request, [&promise](const Status &status, const rpc::AddTaskLeaseReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool AttemptTaskReconstruction(const rpc::AttemptTaskReconstructionRequest &request) {
std::promise<bool> promise;
client_->AttemptTaskReconstruction(
request, [&promise](const Status &status,
const rpc::AttemptTaskReconstructionReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool AddProfileData(const rpc::AddProfileDataRequest &request) {
std::promise<bool> promise;
client_->AddProfileData(
request, [&promise](const Status &status, const rpc::AddProfileDataReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool ReportJobError(const rpc::ReportJobErrorRequest &request) {
std::promise<bool> promise;
client_->ReportJobError(
request, [&promise](const Status &status, const rpc::ReportJobErrorReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool ReportWorkerFailure(const rpc::ReportWorkerFailureRequest &request) {
std::promise<bool> promise;
client_->ReportWorkerFailure(
request,
[&promise](const Status &status, const rpc::ReportWorkerFailureReply &reply) {
RAY_CHECK_OK(status);
promise.set_value(true);
});
return WaitReady(promise.get_future(), timeout_ms_);
}
bool WaitReady(const std::future<bool> &future, uint64_t timeout_ms) {
auto status = future.wait_for(std::chrono::milliseconds(timeout_ms));
return status == std::future_status::ready;
}
rpc::JobTableData GenJobTableData(JobID job_id) {
rpc::JobTableData job_table_data;
job_table_data.set_job_id(job_id.Binary());
job_table_data.set_is_dead(false);
job_table_data.set_timestamp(std::time(nullptr));
job_table_data.set_node_manager_address("127.0.0.1");
job_table_data.set_driver_pid(5667L);
return job_table_data;
}
rpc::ActorTableData GenActorTableData(const JobID &job_id) {
rpc::ActorTableData actor_table_data;
ActorID actor_id = ActorID::Of(job_id, RandomTaskId(), 0);
actor_table_data.set_actor_id(actor_id.Binary());
actor_table_data.set_job_id(job_id.Binary());
actor_table_data.set_state(
rpc::ActorTableData_ActorState::ActorTableData_ActorState_ALIVE);
actor_table_data.set_max_reconstructions(1);
actor_table_data.set_remaining_reconstructions(1);
return actor_table_data;
}
rpc::GcsNodeInfo GenGcsNodeInfo(const std::string &node_id) {
rpc::GcsNodeInfo gcs_node_info;
gcs_node_info.set_node_id(node_id);
gcs_node_info.set_state(rpc::GcsNodeInfo_GcsNodeState_ALIVE);
return gcs_node_info;
}
rpc::TaskTableData GenTaskTableData(const std::string &job_id,
const std::string &task_id) {
rpc::TaskTableData task_table_data;
rpc::Task task;
rpc::TaskSpec task_spec;
task_spec.set_job_id(job_id);
task_spec.set_task_id(task_id);
task.mutable_task_spec()->CopyFrom(task_spec);
task_table_data.mutable_task()->CopyFrom(task);
return task_table_data;
}
rpc::TaskLeaseData GenTaskLeaseData(const std::string &task_id,
const std::string &node_id) {
rpc::TaskLeaseData task_lease_data;
task_lease_data.set_task_id(task_id);
task_lease_data.set_node_manager_id(node_id);
return task_lease_data;
}
protected:
// Gcs server
std::unique_ptr<gcs::GcsServer> gcs_server_;
std::unique_ptr<std::thread> thread_io_service_;
std::unique_ptr<std::thread> thread_gcs_server_;
boost::asio::io_service io_service_;
// Gcs client
std::unique_ptr<rpc::GcsRpcClient> client_;
std::unique_ptr<rpc::ClientCallManager> client_call_manager_;
// Timeout waiting for gcs server reply, default is 2s
const uint64_t timeout_ms_ = 2000;
};
TEST_F(GcsServerTest, TestActorInfo) {
// Create actor_table_data
JobID job_id = JobID::FromInt(1);
rpc::ActorTableData actor_table_data = GenActorTableData(job_id);
// Register actor
rpc::RegisterActorInfoRequest register_actor_info_request;
register_actor_info_request.mutable_actor_table_data()->CopyFrom(actor_table_data);
ASSERT_TRUE(RegisterActorInfo(register_actor_info_request));
rpc::ActorTableData result = GetActorInfo(actor_table_data.actor_id());
ASSERT_TRUE(result.state() ==
rpc::ActorTableData_ActorState::ActorTableData_ActorState_ALIVE);
// Update actor state
rpc::UpdateActorInfoRequest update_actor_info_request;
actor_table_data.set_state(
rpc::ActorTableData_ActorState::ActorTableData_ActorState_DEAD);
update_actor_info_request.set_actor_id(actor_table_data.actor_id());
update_actor_info_request.mutable_actor_table_data()->CopyFrom(actor_table_data);
ASSERT_TRUE(UpdateActorInfo(update_actor_info_request));
result = GetActorInfo(actor_table_data.actor_id());
ASSERT_TRUE(result.state() ==
rpc::ActorTableData_ActorState::ActorTableData_ActorState_DEAD);
// Add actor checkpoint
ActorCheckpointID checkpoint_id = ActorCheckpointID::FromRandom();
rpc::ActorCheckpointData checkpoint;
checkpoint.set_actor_id(actor_table_data.actor_id());
checkpoint.set_checkpoint_id(checkpoint_id.Binary());
checkpoint.set_execution_dependency(checkpoint_id.Binary());
rpc::AddActorCheckpointRequest add_actor_checkpoint_request;
add_actor_checkpoint_request.mutable_checkpoint_data()->CopyFrom(checkpoint);
ASSERT_TRUE(AddActorCheckpoint(add_actor_checkpoint_request));
rpc::ActorCheckpointData checkpoint_result = GetActorCheckpoint(checkpoint_id.Binary());
ASSERT_TRUE(checkpoint_result.actor_id() == actor_table_data.actor_id());
ASSERT_TRUE(checkpoint_result.checkpoint_id() == checkpoint_id.Binary());
rpc::ActorCheckpointIdData checkpoint_id_result =
GetActorCheckpointID(actor_table_data.actor_id());
ASSERT_TRUE(checkpoint_id_result.actor_id() == actor_table_data.actor_id());
ASSERT_TRUE(checkpoint_id_result.checkpoint_ids_size() == 1);
}
TEST_F(GcsServerTest, TestJobInfo) {
// Create job_table_data
JobID job_id = JobID::FromInt(1);
rpc::JobTableData job_table_data = GenJobTableData(job_id);
// Add job
rpc::AddJobRequest add_job_request;
add_job_request.mutable_data()->CopyFrom(job_table_data);
ASSERT_TRUE(AddJob(add_job_request));
// Mark job finished
rpc::MarkJobFinishedRequest mark_job_finished_request;
mark_job_finished_request.set_job_id(job_table_data.job_id());
ASSERT_TRUE(MarkJobFinished(mark_job_finished_request));
}
TEST_F(GcsServerTest, TestNodeInfo) {
// Create gcs node info
ClientID node_id = ClientID::FromRandom();
rpc::GcsNodeInfo gcs_node_info = GenGcsNodeInfo(node_id.Binary());
// Register node info
rpc::RegisterNodeRequest register_node_info_request;
register_node_info_request.mutable_node_info()->CopyFrom(gcs_node_info);
ASSERT_TRUE(RegisterNode(register_node_info_request));
std::vector<rpc::GcsNodeInfo> node_info_list = GetAllNodeInfo();
ASSERT_TRUE(node_info_list.size() == 1);
ASSERT_TRUE(node_info_list[0].state() ==
rpc::GcsNodeInfo_GcsNodeState::GcsNodeInfo_GcsNodeState_ALIVE);
// Report heartbeat
rpc::ReportHeartbeatRequest report_heartbeat_request;
report_heartbeat_request.mutable_heartbeat()->set_client_id(node_id.Binary());
ASSERT_TRUE(ReportHeartbeat(report_heartbeat_request));
rpc::ReportBatchHeartbeatRequest report_batch_heartbeat_request;
report_batch_heartbeat_request.mutable_heartbeat_batch()->add_batch()->set_client_id(
node_id.Binary());
ASSERT_TRUE(ReportBatchHeartbeat(report_batch_heartbeat_request));
// Unregister node info
rpc::UnregisterNodeRequest unregister_node_info_request;
unregister_node_info_request.set_node_id(node_id.Binary());
ASSERT_TRUE(UnregisterNode(unregister_node_info_request));
node_info_list = GetAllNodeInfo();
ASSERT_TRUE(node_info_list.size() == 1);
ASSERT_TRUE(node_info_list[0].state() ==
rpc::GcsNodeInfo_GcsNodeState::GcsNodeInfo_GcsNodeState_DEAD);
// Update node resources
rpc::UpdateResourcesRequest update_resources_request;
update_resources_request.set_node_id(node_id.Binary());
rpc::ResourceTableData resource_table_data;
resource_table_data.set_resource_capacity(1.0);
std::string resource_name = "CPU";
(*update_resources_request.mutable_resources())[resource_name] = resource_table_data;
ASSERT_TRUE(UpdateResources(update_resources_request));
auto resources = GetResources(node_id.Binary());
ASSERT_TRUE(resources.size() == 1);
// Delete node resources
rpc::DeleteResourcesRequest delete_resources_request;
delete_resources_request.set_node_id(node_id.Binary());
delete_resources_request.add_resource_name_list(resource_name);
ASSERT_TRUE(DeleteResources(delete_resources_request));
resources = GetResources(node_id.Binary());
ASSERT_TRUE(resources.size() == 0);
}
TEST_F(GcsServerTest, TestObjectInfo) {
// Create object table data
ObjectID object_id = ObjectID::FromRandom();
ClientID node1_id = ClientID::FromRandom();
ClientID node2_id = ClientID::FromRandom();
// Add object location
rpc::AddObjectLocationRequest add_object_location_request;
add_object_location_request.set_object_id(object_id.Binary());
add_object_location_request.set_node_id(node1_id.Binary());
ASSERT_TRUE(AddObjectLocation(add_object_location_request));
std::vector<rpc::ObjectTableData> object_locations =
GetObjectLocations(object_id.Binary());
ASSERT_TRUE(object_locations.size() == 1);
ASSERT_TRUE(object_locations[0].manager() == node1_id.Binary());
add_object_location_request.set_node_id(node2_id.Binary());
ASSERT_TRUE(AddObjectLocation(add_object_location_request));
object_locations = GetObjectLocations(object_id.Binary());
ASSERT_TRUE(object_locations.size() == 2);
// Remove object location
rpc::RemoveObjectLocationRequest remove_object_location_request;
remove_object_location_request.set_object_id(object_id.Binary());
remove_object_location_request.set_node_id(node1_id.Binary());
ASSERT_TRUE(RemoveObjectLocation(remove_object_location_request));
object_locations = GetObjectLocations(object_id.Binary());
ASSERT_TRUE(object_locations.size() == 1);
ASSERT_TRUE(object_locations[0].manager() == node2_id.Binary());
}
TEST_F(GcsServerTest, TestTaskInfo) {
// Create task_table_data
JobID job_id = JobID::FromInt(1);
TaskID task_id = TaskID::ForDriverTask(job_id);
rpc::TaskTableData job_table_data = GenTaskTableData(job_id.Binary(), task_id.Binary());
// Add task
rpc::AddTaskRequest add_task_request;
add_task_request.mutable_task_data()->CopyFrom(job_table_data);
ASSERT_TRUE(AddTask(add_task_request));
rpc::TaskTableData result = GetTask(task_id.Binary());
ASSERT_TRUE(result.task().task_spec().job_id() == job_id.Binary());
// Delete task
rpc::DeleteTasksRequest delete_tasks_request;
delete_tasks_request.add_task_id_list(task_id.Binary());
ASSERT_TRUE(DeleteTasks(delete_tasks_request));
result = GetTask(task_id.Binary());
ASSERT_TRUE(!result.has_task());
// Add task lease
ClientID node_id = ClientID::FromRandom();
rpc::TaskLeaseData task_lease_data =
GenTaskLeaseData(task_id.Binary(), node_id.Binary());
rpc::AddTaskLeaseRequest add_task_lease_request;
add_task_lease_request.mutable_task_lease_data()->CopyFrom(task_lease_data);
ASSERT_TRUE(AddTaskLease(add_task_lease_request));
// Attempt task reconstruction
rpc::AttemptTaskReconstructionRequest attempt_task_reconstruction_request;
rpc::TaskReconstructionData task_reconstruction_data;
task_reconstruction_data.set_task_id(task_id.Binary());
task_reconstruction_data.set_node_manager_id(node_id.Binary());
task_reconstruction_data.set_num_reconstructions(0);
attempt_task_reconstruction_request.mutable_task_reconstruction()->CopyFrom(
task_reconstruction_data);
ASSERT_TRUE(AttemptTaskReconstruction(attempt_task_reconstruction_request));
}
TEST_F(GcsServerTest, TestStats) {
rpc::ProfileTableData profile_table_data;
profile_table_data.set_component_id(ClientID::FromRandom().Binary());
rpc::AddProfileDataRequest add_profile_data_request;
add_profile_data_request.mutable_profile_data()->CopyFrom(profile_table_data);
ASSERT_TRUE(AddProfileData(add_profile_data_request));
}
TEST_F(GcsServerTest, TestErrorInfo) {
// Report error
rpc::ReportJobErrorRequest report_error_request;
rpc::ErrorTableData error_table_data;
JobID job_id = JobID::FromInt(1);
error_table_data.set_job_id(job_id.Binary());
report_error_request.mutable_error_data()->CopyFrom(error_table_data);
ASSERT_TRUE(ReportJobError(report_error_request));
}
TEST_F(GcsServerTest, TestWorkerInfo) {
rpc::WorkerFailureData worker_failure_data;
worker_failure_data.mutable_worker_address()->set_ip_address("127.0.0.1");
worker_failure_data.mutable_worker_address()->set_port(5566);
rpc::ReportWorkerFailureRequest report_worker_failure_request;
report_worker_failure_request.mutable_worker_failure()->CopyFrom(worker_failure_data);
ASSERT_TRUE(ReportWorkerFailure(report_worker_failure_request));
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/worker_info_handler_impl.cc
|
C++
|
#include "worker_info_handler_impl.h"
namespace ray {
namespace rpc {
void DefaultWorkerInfoHandler::HandleReportWorkerFailure(
const ReportWorkerFailureRequest &request, ReportWorkerFailureReply *reply,
SendReplyCallback send_reply_callback) {
Address worker_address = request.worker_failure().worker_address();
RAY_LOG(DEBUG) << "Reporting worker failure, " << worker_address.DebugString();
auto worker_failure_data = std::make_shared<WorkerFailureData>();
worker_failure_data->CopyFrom(request.worker_failure());
auto on_done = [worker_address, send_reply_callback](Status status) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to report worker failure, "
<< worker_address.DebugString();
}
send_reply_callback(status, nullptr, nullptr);
};
Status status =
gcs_client_.Workers().AsyncReportWorkerFailure(worker_failure_data, on_done);
if (!status.ok()) {
on_done(status);
}
RAY_LOG(DEBUG) << "Finished reporting worker failure, " << worker_address.DebugString();
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/gcs_server/worker_info_handler_impl.h
|
C/C++ Header
|
#ifndef RAY_GCS_WORKER_INFO_HANDLER_IMPL_H
#define RAY_GCS_WORKER_INFO_HANDLER_IMPL_H
#include "ray/gcs/redis_gcs_client.h"
#include "ray/rpc/gcs_server/gcs_rpc_server.h"
namespace ray {
namespace rpc {
/// This implementation class of `WorkerInfoHandler`.
class DefaultWorkerInfoHandler : public rpc::WorkerInfoHandler {
public:
explicit DefaultWorkerInfoHandler(gcs::RedisGcsClient &gcs_client)
: gcs_client_(gcs_client) {}
void HandleReportWorkerFailure(const ReportWorkerFailureRequest &request,
ReportWorkerFailureReply *reply,
SendReplyCallback send_reply_callback) override;
private:
gcs::RedisGcsClient &gcs_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_GCS_WORKER_INFO_HANDLER_IMPL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.