file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
src/ray/gcs/pb_util.h
|
C/C++ Header
|
#ifndef RAY_GCS_PB_UTIL_H
#define RAY_GCS_PB_UTIL_H
#include <memory>
#include "ray/common/id.h"
#include "ray/common/task/task_spec.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
namespace gcs {
/// Helper function to produce job table data (for newly created job or updated job).
///
/// \param job_id The ID of job that need to be registered or updated.
/// \param is_dead Whether the driver of this job is dead.
/// \param timestamp The UNIX timestamp of corresponding to this event.
/// \param node_manager_address Address of the node this job was started on.
/// \param driver_pid Process ID of the driver running this job.
/// \return The job table data created by this method.
inline std::shared_ptr<ray::rpc::JobTableData> CreateJobTableData(
const ray::JobID &job_id, bool is_dead, int64_t timestamp,
const std::string &node_manager_address, int64_t driver_pid) {
auto job_info_ptr = std::make_shared<ray::rpc::JobTableData>();
job_info_ptr->set_job_id(job_id.Binary());
job_info_ptr->set_is_dead(is_dead);
job_info_ptr->set_timestamp(timestamp);
job_info_ptr->set_node_manager_address(node_manager_address);
job_info_ptr->set_driver_pid(driver_pid);
return job_info_ptr;
}
/// Helper function to produce error table data.
inline std::shared_ptr<ray::rpc::ErrorTableData> CreateErrorTableData(
const std::string &error_type, const std::string &error_msg, double timestamp,
const JobID &job_id = JobID::Nil()) {
auto error_info_ptr = std::make_shared<ray::rpc::ErrorTableData>();
error_info_ptr->set_type(error_type);
error_info_ptr->set_error_message(error_msg);
error_info_ptr->set_timestamp(timestamp);
error_info_ptr->set_job_id(job_id.Binary());
return error_info_ptr;
}
/// Helper function to produce actor table data.
inline std::shared_ptr<ray::rpc::ActorTableData> CreateActorTableData(
const TaskSpecification &task_spec, const ray::rpc::Address &address,
ray::rpc::ActorTableData::ActorState state, uint64_t remaining_reconstructions) {
RAY_CHECK(task_spec.IsActorCreationTask());
auto actor_id = task_spec.ActorCreationId();
auto actor_info_ptr = std::make_shared<ray::rpc::ActorTableData>();
// Set all of the static fields for the actor. These fields will not change
// even if the actor fails or is reconstructed.
actor_info_ptr->set_actor_id(actor_id.Binary());
actor_info_ptr->set_parent_id(task_spec.CallerId().Binary());
actor_info_ptr->set_actor_creation_dummy_object_id(
task_spec.ActorDummyObject().Binary());
actor_info_ptr->set_job_id(task_spec.JobId().Binary());
actor_info_ptr->set_max_reconstructions(task_spec.MaxActorReconstructions());
actor_info_ptr->set_is_detached(task_spec.IsDetachedActor());
// Set the fields that change when the actor is restarted.
actor_info_ptr->set_remaining_reconstructions(remaining_reconstructions);
actor_info_ptr->set_is_direct_call(task_spec.IsDirectCall());
actor_info_ptr->mutable_address()->CopyFrom(address);
actor_info_ptr->mutable_owner_address()->CopyFrom(
task_spec.GetMessage().caller_address());
actor_info_ptr->set_state(state);
return actor_info_ptr;
}
/// Helper function to produce worker failure data.
inline std::shared_ptr<ray::rpc::WorkerFailureData> CreateWorkerFailureData(
const ClientID &raylet_id, const WorkerID &worker_id, const std::string &address,
int32_t port, int64_t timestamp = std::time(nullptr)) {
auto worker_failure_info_ptr = std::make_shared<ray::rpc::WorkerFailureData>();
worker_failure_info_ptr->mutable_worker_address()->set_raylet_id(raylet_id.Binary());
worker_failure_info_ptr->mutable_worker_address()->set_worker_id(worker_id.Binary());
worker_failure_info_ptr->mutable_worker_address()->set_ip_address(address);
worker_failure_info_ptr->mutable_worker_address()->set_port(port);
worker_failure_info_ptr->set_timestamp(timestamp);
return worker_failure_info_ptr;
}
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_PB_UTIL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_accessor.cc
|
C++
|
#include "ray/gcs/redis_accessor.h"
#include <boost/none.hpp>
#include "ray/gcs/pb_util.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/util/logging.h"
namespace ray {
namespace gcs {
RedisActorInfoAccessor::RedisActorInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl), actor_sub_executor_(client_impl_->actor_table()) {}
Status RedisActorInfoAccessor::AsyncGet(
const ActorID &actor_id, const OptionalItemCallback<ActorTableData> &callback) {
RAY_CHECK(callback != nullptr);
auto on_done = [callback](RedisGcsClient *client, const ActorID &actor_id,
const std::vector<ActorTableData> &data) {
boost::optional<ActorTableData> result;
if (!data.empty()) {
result = data.back();
}
callback(Status::OK(), result);
};
return client_impl_->actor_table().Lookup(JobID::Nil(), actor_id, on_done);
}
Status RedisActorInfoAccessor::AsyncRegister(
const std::shared_ptr<ActorTableData> &data_ptr, const StatusCallback &callback) {
auto on_success = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorTableData &data) {
if (callback != nullptr) {
callback(Status::OK());
}
};
auto on_failure = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorTableData &data) {
if (callback != nullptr) {
callback(Status::Invalid("Adding actor failed."));
}
};
ActorID actor_id = ActorID::FromBinary(data_ptr->actor_id());
return client_impl_->actor_table().AppendAt(JobID::Nil(), actor_id, data_ptr,
on_success, on_failure,
/*log_length*/ 0);
}
Status RedisActorInfoAccessor::AsyncUpdate(
const ActorID &actor_id, const std::shared_ptr<ActorTableData> &data_ptr,
const StatusCallback &callback) {
// The actor log starts with an ALIVE entry. This is followed by 0 to N pairs
// of (RECONSTRUCTING, ALIVE) entries, where N is the maximum number of
// reconstructions. This is followed optionally by a DEAD entry.
int log_length =
2 * (data_ptr->max_reconstructions() - data_ptr->remaining_reconstructions());
if (data_ptr->state() != ActorTableData::ALIVE) {
// RECONSTRUCTING or DEAD entries have an odd index.
log_length += 1;
}
RAY_LOG(DEBUG) << "AsyncUpdate actor state to " << data_ptr->state()
<< ", actor id: " << actor_id << ", log_length: " << log_length;
auto on_success = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorTableData &data) {
// If we successfully appended a record to the GCS table of the actor that
// has died, signal this to anyone receiving signals from this actor.
if (data.state() == ActorTableData::DEAD ||
data.state() == ActorTableData::RECONSTRUCTING) {
std::vector<std::string> args = {"XADD", actor_id.Hex(), "*", "signal",
"ACTOR_DIED_SIGNAL"};
auto redis_context = client->primary_context();
RAY_CHECK_OK(redis_context->RunArgvAsync(args));
}
if (callback != nullptr) {
callback(Status::OK());
}
};
auto on_failure = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorTableData &data) {
if (callback != nullptr) {
callback(Status::Invalid("Updating actor failed."));
}
};
return client_impl_->actor_table().AppendAt(JobID::Nil(), actor_id, data_ptr,
on_success, on_failure, log_length);
}
Status RedisActorInfoAccessor::AsyncSubscribeAll(
const SubscribeCallback<ActorID, ActorTableData> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return actor_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), subscribe, done);
}
Status RedisActorInfoAccessor::AsyncSubscribe(
const ActorID &actor_id, const SubscribeCallback<ActorID, ActorTableData> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return actor_sub_executor_.AsyncSubscribe(subscribe_id_, actor_id, subscribe, done);
}
Status RedisActorInfoAccessor::AsyncUnsubscribe(const ActorID &actor_id,
const StatusCallback &done) {
return actor_sub_executor_.AsyncUnsubscribe(subscribe_id_, actor_id, done);
}
Status RedisActorInfoAccessor::AsyncAddCheckpoint(
const std::shared_ptr<ActorCheckpointData> &data_ptr,
const StatusCallback &callback) {
auto on_add_data_done = [callback, data_ptr, this](
RedisGcsClient *client,
const ActorCheckpointID &checkpoint_id,
const ActorCheckpointData &data) {
ActorID actor_id = ActorID::FromBinary(data_ptr->actor_id());
Status status = AsyncAddCheckpointID(actor_id, checkpoint_id, callback);
if (!status.ok()) {
callback(status);
}
};
ActorCheckpointID checkpoint_id =
ActorCheckpointID::FromBinary(data_ptr->checkpoint_id());
ActorCheckpointTable &actor_cp_table = client_impl_->actor_checkpoint_table();
return actor_cp_table.Add(JobID::Nil(), checkpoint_id, data_ptr, on_add_data_done);
}
Status RedisActorInfoAccessor::AsyncGetCheckpoint(
const ActorCheckpointID &checkpoint_id,
const OptionalItemCallback<ActorCheckpointData> &callback) {
RAY_CHECK(callback != nullptr);
auto on_success = [callback](RedisGcsClient *client,
const ActorCheckpointID &checkpoint_id,
const ActorCheckpointData &checkpoint_data) {
boost::optional<ActorCheckpointData> optional(checkpoint_data);
callback(Status::OK(), std::move(optional));
};
auto on_failure = [callback](RedisGcsClient *client,
const ActorCheckpointID &checkpoint_id) {
boost::optional<ActorCheckpointData> optional;
callback(Status::Invalid("Invalid checkpoint id."), std::move(optional));
};
ActorCheckpointTable &actor_cp_table = client_impl_->actor_checkpoint_table();
return actor_cp_table.Lookup(JobID::Nil(), checkpoint_id, on_success, on_failure);
}
Status RedisActorInfoAccessor::AsyncGetCheckpointID(
const ActorID &actor_id,
const OptionalItemCallback<ActorCheckpointIdData> &callback) {
RAY_CHECK(callback != nullptr);
auto on_success = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorCheckpointIdData &data) {
boost::optional<ActorCheckpointIdData> optional(data);
callback(Status::OK(), std::move(optional));
};
auto on_failure = [callback](RedisGcsClient *client, const ActorID &actor_id) {
boost::optional<ActorCheckpointIdData> optional;
callback(Status::Invalid("Checkpoint not found."), std::move(optional));
};
ActorCheckpointIdTable &cp_id_table = client_impl_->actor_checkpoint_id_table();
return cp_id_table.Lookup(JobID::Nil(), actor_id, on_success, on_failure);
}
Status RedisActorInfoAccessor::AsyncAddCheckpointID(
const ActorID &actor_id, const ActorCheckpointID &checkpoint_id,
const StatusCallback &callback) {
ActorCheckpointIdTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ActorID &actor_id,
const ActorCheckpointIdData &data) { callback(Status::OK()); };
}
ActorCheckpointIdTable &cp_id_table = client_impl_->actor_checkpoint_id_table();
return cp_id_table.AddCheckpointId(JobID::Nil(), actor_id, checkpoint_id, on_done);
}
RedisJobInfoAccessor::RedisJobInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl), job_sub_executor_(client_impl->job_table()) {}
Status RedisJobInfoAccessor::AsyncAdd(const std::shared_ptr<JobTableData> &data_ptr,
const StatusCallback &callback) {
return DoAsyncAppend(data_ptr, callback);
}
Status RedisJobInfoAccessor::AsyncMarkFinished(const JobID &job_id,
const StatusCallback &callback) {
std::shared_ptr<JobTableData> data_ptr =
CreateJobTableData(job_id, /*is_dead*/ true, /*time_stamp*/ std::time(nullptr),
/*node_manager_address*/ "", /*driver_pid*/ -1);
return DoAsyncAppend(data_ptr, callback);
}
Status RedisJobInfoAccessor::DoAsyncAppend(const std::shared_ptr<JobTableData> &data_ptr,
const StatusCallback &callback) {
JobTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const JobID &job_id,
const JobTableData &data) { callback(Status::OK()); };
}
JobID job_id = JobID::FromBinary(data_ptr->job_id());
return client_impl_->job_table().Append(job_id, job_id, data_ptr, on_done);
}
Status RedisJobInfoAccessor::AsyncSubscribeToFinishedJobs(
const SubscribeCallback<JobID, JobTableData> &subscribe, const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
auto on_subscribe = [subscribe](const JobID &job_id, const JobTableData &job_data) {
if (job_data.is_dead()) {
subscribe(job_id, job_data);
}
};
return job_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), on_subscribe, done);
}
RedisTaskInfoAccessor::RedisTaskInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl),
task_sub_executor_(client_impl->raylet_task_table()),
task_lease_sub_executor_(client_impl->task_lease_table()) {}
Status RedisTaskInfoAccessor::AsyncAdd(const std::shared_ptr<TaskTableData> &data_ptr,
const StatusCallback &callback) {
raylet::TaskTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const TaskID &task_id,
const TaskTableData &data) { callback(Status::OK()); };
}
TaskID task_id = TaskID::FromBinary(data_ptr->task().task_spec().task_id());
raylet::TaskTable &task_table = client_impl_->raylet_task_table();
return task_table.Add(JobID::Nil(), task_id, data_ptr, on_done);
}
Status RedisTaskInfoAccessor::AsyncGet(
const TaskID &task_id, const OptionalItemCallback<TaskTableData> &callback) {
RAY_CHECK(callback != nullptr);
auto on_success = [callback](RedisGcsClient *client, const TaskID &task_id,
const TaskTableData &data) {
boost::optional<TaskTableData> result(data);
callback(Status::OK(), result);
};
auto on_failure = [callback](RedisGcsClient *client, const TaskID &task_id) {
boost::optional<TaskTableData> result;
callback(Status::Invalid("Task not exist."), result);
};
raylet::TaskTable &task_table = client_impl_->raylet_task_table();
return task_table.Lookup(JobID::Nil(), task_id, on_success, on_failure);
}
Status RedisTaskInfoAccessor::AsyncDelete(const std::vector<TaskID> &task_ids,
const StatusCallback &callback) {
raylet::TaskTable &task_table = client_impl_->raylet_task_table();
task_table.Delete(JobID::Nil(), task_ids);
if (callback) {
callback(Status::OK());
}
// TODO(micafan) Always return OK here.
// Confirm if we need to handle the deletion failure and how to handle it.
return Status::OK();
}
Status RedisTaskInfoAccessor::AsyncSubscribe(
const TaskID &task_id, const SubscribeCallback<TaskID, TaskTableData> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return task_sub_executor_.AsyncSubscribe(subscribe_id_, task_id, subscribe, done);
}
Status RedisTaskInfoAccessor::AsyncUnsubscribe(const TaskID &task_id,
const StatusCallback &done) {
return task_sub_executor_.AsyncUnsubscribe(subscribe_id_, task_id, done);
}
Status RedisTaskInfoAccessor::AsyncAddTaskLease(
const std::shared_ptr<TaskLeaseData> &data_ptr, const StatusCallback &callback) {
TaskLeaseTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const TaskID &id,
const TaskLeaseData &data) { callback(Status::OK()); };
}
TaskID task_id = TaskID::FromBinary(data_ptr->task_id());
TaskLeaseTable &task_lease_table = client_impl_->task_lease_table();
return task_lease_table.Add(JobID::Nil(), task_id, data_ptr, on_done);
}
Status RedisTaskInfoAccessor::AsyncSubscribeTaskLease(
const TaskID &task_id,
const SubscribeCallback<TaskID, boost::optional<TaskLeaseData>> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return task_lease_sub_executor_.AsyncSubscribe(subscribe_id_, task_id, subscribe, done);
}
Status RedisTaskInfoAccessor::AsyncUnsubscribeTaskLease(const TaskID &task_id,
const StatusCallback &done) {
return task_lease_sub_executor_.AsyncUnsubscribe(subscribe_id_, task_id, done);
}
Status RedisTaskInfoAccessor::AttemptTaskReconstruction(
const std::shared_ptr<TaskReconstructionData> &data_ptr,
const StatusCallback &callback) {
TaskReconstructionLog::WriteCallback on_success = nullptr;
TaskReconstructionLog::WriteCallback on_failure = nullptr;
if (callback != nullptr) {
on_success = [callback](RedisGcsClient *client, const TaskID &id,
const TaskReconstructionData &data) {
callback(Status::OK());
};
on_failure = [callback](RedisGcsClient *client, const TaskID &id,
const TaskReconstructionData &data) {
callback(Status::Invalid("Updating task reconstruction failed."));
};
}
TaskID task_id = TaskID::FromBinary(data_ptr->task_id());
int reconstruction_attempt = data_ptr->num_reconstructions();
TaskReconstructionLog &task_reconstruction_log =
client_impl_->task_reconstruction_log();
return task_reconstruction_log.AppendAt(JobID::Nil(), task_id, data_ptr, on_success,
on_failure, reconstruction_attempt);
}
RedisObjectInfoAccessor::RedisObjectInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl), object_sub_executor_(client_impl->object_table()) {}
Status RedisObjectInfoAccessor::AsyncGetLocations(
const ObjectID &object_id, const MultiItemCallback<ObjectTableData> &callback) {
RAY_CHECK(callback != nullptr);
auto on_done = [callback](RedisGcsClient *client, const ObjectID &object_id,
const std::vector<ObjectTableData> &data) {
callback(Status::OK(), data);
};
ObjectTable &object_table = client_impl_->object_table();
return object_table.Lookup(JobID::Nil(), object_id, on_done);
}
Status RedisObjectInfoAccessor::AsyncAddLocation(const ObjectID &object_id,
const ClientID &node_id,
const StatusCallback &callback) {
std::function<void(RedisGcsClient * client, const ObjectID &id,
const ObjectTableData &data)>
on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ObjectID &object_id,
const ObjectTableData &data) { callback(Status::OK()); };
}
std::shared_ptr<ObjectTableData> data_ptr = std::make_shared<ObjectTableData>();
data_ptr->set_manager(node_id.Binary());
ObjectTable &object_table = client_impl_->object_table();
return object_table.Add(JobID::Nil(), object_id, data_ptr, on_done);
}
Status RedisObjectInfoAccessor::AsyncRemoveLocation(const ObjectID &object_id,
const ClientID &node_id,
const StatusCallback &callback) {
std::function<void(RedisGcsClient * client, const ObjectID &id,
const ObjectTableData &data)>
on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ObjectID &object_id,
const ObjectTableData &data) { callback(Status::OK()); };
}
std::shared_ptr<ObjectTableData> data_ptr = std::make_shared<ObjectTableData>();
data_ptr->set_manager(node_id.Binary());
ObjectTable &object_table = client_impl_->object_table();
return object_table.Remove(JobID::Nil(), object_id, data_ptr, on_done);
}
Status RedisObjectInfoAccessor::AsyncSubscribeToLocations(
const ObjectID &object_id,
const SubscribeCallback<ObjectID, ObjectChangeNotification> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return object_sub_executor_.AsyncSubscribe(subscribe_id_, object_id, subscribe, done);
}
Status RedisObjectInfoAccessor::AsyncUnsubscribeToLocations(const ObjectID &object_id,
const StatusCallback &done) {
return object_sub_executor_.AsyncUnsubscribe(subscribe_id_, object_id, done);
}
RedisNodeInfoAccessor::RedisNodeInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl),
resource_sub_executor_(client_impl_->resource_table()),
heartbeat_sub_executor_(client_impl->heartbeat_table()),
heartbeat_batch_sub_executor_(client_impl->heartbeat_batch_table()) {}
Status RedisNodeInfoAccessor::RegisterSelf(const GcsNodeInfo &local_node_info) {
ClientTable &client_table = client_impl_->client_table();
return client_table.Connect(local_node_info);
}
Status RedisNodeInfoAccessor::UnregisterSelf() {
ClientTable &client_table = client_impl_->client_table();
return client_table.Disconnect();
}
const ClientID &RedisNodeInfoAccessor::GetSelfId() const {
ClientTable &client_table = client_impl_->client_table();
return client_table.GetLocalClientId();
}
const GcsNodeInfo &RedisNodeInfoAccessor::GetSelfInfo() const {
ClientTable &client_table = client_impl_->client_table();
return client_table.GetLocalClient();
}
Status RedisNodeInfoAccessor::AsyncRegister(const GcsNodeInfo &node_info,
const StatusCallback &callback) {
ClientTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &id,
const GcsNodeInfo &data) { callback(Status::OK()); };
}
ClientTable &client_table = client_impl_->client_table();
return client_table.MarkConnected(node_info, on_done);
}
Status RedisNodeInfoAccessor::AsyncUnregister(const ClientID &node_id,
const StatusCallback &callback) {
ClientTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &id,
const GcsNodeInfo &data) { callback(Status::OK()); };
}
ClientTable &client_table = client_impl_->client_table();
return client_table.MarkDisconnected(node_id, on_done);
}
Status RedisNodeInfoAccessor::AsyncSubscribeToNodeChange(
const SubscribeCallback<ClientID, GcsNodeInfo> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
ClientTable &client_table = client_impl_->client_table();
return client_table.SubscribeToNodeChange(subscribe, done);
}
Status RedisNodeInfoAccessor::AsyncGetAll(
const MultiItemCallback<GcsNodeInfo> &callback) {
RAY_CHECK(callback != nullptr);
auto on_done = [callback](RedisGcsClient *client, const ClientID &id,
const std::vector<GcsNodeInfo> &data) {
std::vector<GcsNodeInfo> result;
std::set<std::string> node_ids;
for (int index = data.size() - 1; index >= 0; --index) {
if (node_ids.insert(data[index].node_id()).second) {
result.emplace_back(data[index]);
}
}
callback(Status::OK(), result);
};
ClientTable &client_table = client_impl_->client_table();
return client_table.Lookup(on_done);
}
boost::optional<GcsNodeInfo> RedisNodeInfoAccessor::Get(const ClientID &node_id) const {
GcsNodeInfo node_info;
ClientTable &client_table = client_impl_->client_table();
bool found = client_table.GetClient(node_id, &node_info);
boost::optional<GcsNodeInfo> optional_node;
if (found) {
optional_node = std::move(node_info);
}
return optional_node;
}
const std::unordered_map<ClientID, GcsNodeInfo> &RedisNodeInfoAccessor::GetAll() const {
ClientTable &client_table = client_impl_->client_table();
return client_table.GetAllClients();
}
bool RedisNodeInfoAccessor::IsRemoved(const ClientID &node_id) const {
ClientTable &client_table = client_impl_->client_table();
return client_table.IsRemoved(node_id);
}
Status RedisNodeInfoAccessor::AsyncReportHeartbeat(
const std::shared_ptr<HeartbeatTableData> &data_ptr, const StatusCallback &callback) {
HeartbeatTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &node_id,
const HeartbeatTableData &data) { callback(Status::OK()); };
}
ClientID node_id = ClientID::FromBinary(data_ptr->client_id());
HeartbeatTable &heartbeat_table = client_impl_->heartbeat_table();
return heartbeat_table.Add(JobID::Nil(), node_id, data_ptr, on_done);
}
Status RedisNodeInfoAccessor::AsyncSubscribeHeartbeat(
const SubscribeCallback<ClientID, HeartbeatTableData> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
auto on_subscribe = [subscribe](const ClientID &node_id,
const HeartbeatTableData &data) {
subscribe(node_id, data);
};
return heartbeat_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), on_subscribe, done);
}
Status RedisNodeInfoAccessor::AsyncReportBatchHeartbeat(
const std::shared_ptr<HeartbeatBatchTableData> &data_ptr,
const StatusCallback &callback) {
HeartbeatBatchTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &node_id,
const HeartbeatBatchTableData &data) { callback(Status::OK()); };
}
HeartbeatBatchTable &hb_batch_table = client_impl_->heartbeat_batch_table();
return hb_batch_table.Add(JobID::Nil(), ClientID::Nil(), data_ptr, on_done);
}
Status RedisNodeInfoAccessor::AsyncSubscribeBatchHeartbeat(
const ItemCallback<HeartbeatBatchTableData> &subscribe, const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
auto on_subscribe = [subscribe](const ClientID &node_id,
const HeartbeatBatchTableData &data) {
subscribe(data);
};
return heartbeat_batch_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), on_subscribe,
done);
}
Status RedisNodeInfoAccessor::AsyncGetResources(
const ClientID &node_id, const OptionalItemCallback<ResourceMap> &callback) {
RAY_CHECK(callback != nullptr);
auto on_done = [callback](RedisGcsClient *client, const ClientID &id,
const ResourceMap &data) {
boost::optional<ResourceMap> result;
if (!data.empty()) {
result = data;
}
callback(Status::OK(), result);
};
DynamicResourceTable &resource_table = client_impl_->resource_table();
return resource_table.Lookup(JobID::Nil(), node_id, on_done);
}
Status RedisNodeInfoAccessor::AsyncUpdateResources(const ClientID &node_id,
const ResourceMap &resources,
const StatusCallback &callback) {
Hash<ClientID, ResourceTableData>::HashCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &node_id,
const ResourceMap &resources) { callback(Status::OK()); };
}
DynamicResourceTable &resource_table = client_impl_->resource_table();
return resource_table.Update(JobID::Nil(), node_id, resources, on_done);
}
Status RedisNodeInfoAccessor::AsyncDeleteResources(
const ClientID &node_id, const std::vector<std::string> &resource_names,
const StatusCallback &callback) {
Hash<ClientID, ResourceTableData>::HashRemoveCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const ClientID &node_id,
const std::vector<std::string> &resource_names) {
callback(Status::OK());
};
}
DynamicResourceTable &resource_table = client_impl_->resource_table();
return resource_table.RemoveEntries(JobID::Nil(), node_id, resource_names, on_done);
}
Status RedisNodeInfoAccessor::AsyncSubscribeToResources(
const SubscribeCallback<ClientID, ResourceChangeNotification> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return resource_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), subscribe, done);
}
RedisErrorInfoAccessor::RedisErrorInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl) {}
Status RedisErrorInfoAccessor::AsyncReportJobError(
const std::shared_ptr<ErrorTableData> &data_ptr, const StatusCallback &callback) {
ErrorTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const JobID &job_id,
const ErrorTableData &data) { callback(Status::OK()); };
}
JobID job_id = JobID::FromBinary(data_ptr->job_id());
ErrorTable &error_table = client_impl_->error_table();
return error_table.Append(job_id, job_id, data_ptr, on_done);
}
RedisStatsInfoAccessor::RedisStatsInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl) {}
Status RedisStatsInfoAccessor::AsyncAddProfileData(
const std::shared_ptr<ProfileTableData> &data_ptr, const StatusCallback &callback) {
ProfileTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const UniqueID &id,
const ProfileTableData &data) { callback(Status::OK()); };
}
ProfileTable &profile_table = client_impl_->profile_table();
return profile_table.Append(JobID::Nil(), UniqueID::FromRandom(), data_ptr, on_done);
}
RedisWorkerInfoAccessor::RedisWorkerInfoAccessor(RedisGcsClient *client_impl)
: client_impl_(client_impl),
worker_failure_sub_executor_(client_impl->worker_failure_table()) {}
Status RedisWorkerInfoAccessor::AsyncSubscribeToWorkerFailures(
const SubscribeCallback<WorkerID, WorkerFailureData> &subscribe,
const StatusCallback &done) {
RAY_CHECK(subscribe != nullptr);
return worker_failure_sub_executor_.AsyncSubscribeAll(ClientID::Nil(), subscribe, done);
}
Status RedisWorkerInfoAccessor::AsyncReportWorkerFailure(
const std::shared_ptr<WorkerFailureData> &data_ptr, const StatusCallback &callback) {
WorkerFailureTable::WriteCallback on_done = nullptr;
if (callback != nullptr) {
on_done = [callback](RedisGcsClient *client, const WorkerID &id,
const WorkerFailureData &data) { callback(Status::OK()); };
}
WorkerID worker_id = WorkerID::FromBinary(data_ptr->worker_address().worker_id());
WorkerFailureTable &worker_failure_table = client_impl_->worker_failure_table();
return worker_failure_table.Add(JobID::Nil(), worker_id, data_ptr, on_done);
}
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_accessor.h
|
C/C++ Header
|
#ifndef RAY_GCS_REDIS_ACCESSOR_H
#define RAY_GCS_REDIS_ACCESSOR_H
#include "ray/common/id.h"
#include "ray/gcs/accessor.h"
#include "ray/gcs/callback.h"
#include "ray/gcs/subscription_executor.h"
#include "ray/gcs/tables.h"
namespace ray {
namespace gcs {
class RedisGcsClient;
/// \class RedisActorInfoAccessor
/// `RedisActorInfoAccessor` is an implementation of `ActorInfoAccessor`
/// that uses Redis as the backend storage.
class RedisActorInfoAccessor : public ActorInfoAccessor {
public:
explicit RedisActorInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisActorInfoAccessor() {}
Status AsyncGet(const ActorID &actor_id,
const OptionalItemCallback<ActorTableData> &callback) override;
Status AsyncRegister(const std::shared_ptr<ActorTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncUpdate(const ActorID &actor_id,
const std::shared_ptr<ActorTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncSubscribeAll(const SubscribeCallback<ActorID, ActorTableData> &subscribe,
const StatusCallback &done) override;
Status AsyncSubscribe(const ActorID &actor_id,
const SubscribeCallback<ActorID, ActorTableData> &subscribe,
const StatusCallback &done) override;
Status AsyncUnsubscribe(const ActorID &actor_id, const StatusCallback &done) override;
Status AsyncAddCheckpoint(const std::shared_ptr<ActorCheckpointData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncGetCheckpoint(
const ActorCheckpointID &checkpoint_id,
const OptionalItemCallback<ActorCheckpointData> &callback) override;
Status AsyncGetCheckpointID(
const ActorID &actor_id,
const OptionalItemCallback<ActorCheckpointIdData> &callback) override;
private:
/// Add checkpoint id to GCS asynchronously.
///
/// \param actor_id The ID of actor that the checkpoint belongs to.
/// \param checkpoint_id The ID of checkpoint that will be added to GCS.
/// \return Status
Status AsyncAddCheckpointID(const ActorID &actor_id,
const ActorCheckpointID &checkpoint_id,
const StatusCallback &callback);
private:
RedisGcsClient *client_impl_{nullptr};
// Use a random ClientID for actor subscription. Because:
// If we use ClientID::Nil, GCS will still send all actors' updates to this GCS Client.
// Even we can filter out irrelevant updates, but there will be extra overhead.
// And because the new GCS Client will no longer hold the local ClientID, so we use
// random ClientID instead.
// TODO(micafan): Remove this random id, once GCS becomes a service.
ClientID subscribe_id_{ClientID::FromRandom()};
typedef SubscriptionExecutor<ActorID, ActorTableData, ActorTable>
ActorSubscriptionExecutor;
ActorSubscriptionExecutor actor_sub_executor_;
};
/// \class RedisJobInfoAccessor
/// RedisJobInfoAccessor is an implementation of `JobInfoAccessor`
/// that uses Redis as the backend storage.
class RedisJobInfoAccessor : public JobInfoAccessor {
public:
explicit RedisJobInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisJobInfoAccessor() {}
Status AsyncAdd(const std::shared_ptr<JobTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncMarkFinished(const JobID &job_id, const StatusCallback &callback) override;
Status AsyncSubscribeToFinishedJobs(
const SubscribeCallback<JobID, JobTableData> &subscribe,
const StatusCallback &done) override;
private:
/// Append job information to GCS asynchronously.
///
/// \param data_ptr The job information that will be appended to GCS.
/// \param callback Callback that will be called after append done.
/// \return Status
Status DoAsyncAppend(const std::shared_ptr<JobTableData> &data_ptr,
const StatusCallback &callback);
RedisGcsClient *client_impl_{nullptr};
typedef SubscriptionExecutor<JobID, JobTableData, JobTable> JobSubscriptionExecutor;
JobSubscriptionExecutor job_sub_executor_;
};
/// \class RedisTaskInfoAccessor
/// `RedisTaskInfoAccessor` is an implementation of `TaskInfoAccessor`
/// that uses Redis as the backend storage.
class RedisTaskInfoAccessor : public TaskInfoAccessor {
public:
explicit RedisTaskInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisTaskInfoAccessor() {}
Status AsyncAdd(const std::shared_ptr<TaskTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncGet(const TaskID &task_id,
const OptionalItemCallback<TaskTableData> &callback) override;
Status AsyncDelete(const std::vector<TaskID> &task_ids,
const StatusCallback &callback) override;
Status AsyncSubscribe(const TaskID &task_id,
const SubscribeCallback<TaskID, TaskTableData> &subscribe,
const StatusCallback &done) override;
Status AsyncUnsubscribe(const TaskID &task_id, const StatusCallback &done) override;
Status AsyncAddTaskLease(const std::shared_ptr<TaskLeaseData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncSubscribeTaskLease(
const TaskID &task_id,
const SubscribeCallback<TaskID, boost::optional<TaskLeaseData>> &subscribe,
const StatusCallback &done) override;
Status AsyncUnsubscribeTaskLease(const TaskID &task_id,
const StatusCallback &done) override;
Status AttemptTaskReconstruction(
const std::shared_ptr<TaskReconstructionData> &data_ptr,
const StatusCallback &callback) override;
private:
RedisGcsClient *client_impl_{nullptr};
// Use a random ClientID for task subscription. Because:
// If we use ClientID::Nil, GCS will still send all tasks' updates to this GCS Client.
// Even we can filter out irrelevant updates, but there will be extra overhead.
// And because the new GCS Client will no longer hold the local ClientID, so we use
// random ClientID instead.
// TODO(micafan): Remove this random id, once GCS becomes a service.
ClientID subscribe_id_{ClientID::FromRandom()};
typedef SubscriptionExecutor<TaskID, TaskTableData, raylet::TaskTable>
TaskSubscriptionExecutor;
TaskSubscriptionExecutor task_sub_executor_;
typedef SubscriptionExecutor<TaskID, boost::optional<TaskLeaseData>, TaskLeaseTable>
TaskLeaseSubscriptionExecutor;
TaskLeaseSubscriptionExecutor task_lease_sub_executor_;
};
/// \class RedisObjectInfoAccessor
/// RedisObjectInfoAccessor is an implementation of `ObjectInfoAccessor`
/// that uses Redis as the backend storage.
class RedisObjectInfoAccessor : public ObjectInfoAccessor {
public:
explicit RedisObjectInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisObjectInfoAccessor() {}
Status AsyncGetLocations(const ObjectID &object_id,
const MultiItemCallback<ObjectTableData> &callback) override;
Status AsyncAddLocation(const ObjectID &object_id, const ClientID &node_id,
const StatusCallback &callback) override;
Status AsyncRemoveLocation(const ObjectID &object_id, const ClientID &node_id,
const StatusCallback &callback) override;
Status AsyncSubscribeToLocations(
const ObjectID &object_id,
const SubscribeCallback<ObjectID, ObjectChangeNotification> &subscribe,
const StatusCallback &done) override;
Status AsyncUnsubscribeToLocations(const ObjectID &object_id,
const StatusCallback &done) override;
private:
RedisGcsClient *client_impl_{nullptr};
// Use a random ClientID for object subscription. Because:
// If we use ClientID::Nil, GCS will still send all objects' updates to this GCS Client.
// Even we can filter out irrelevant updates, but there will be extra overhead.
// And because the new GCS Client will no longer hold the local ClientID, so we use
// random ClientID instead.
// TODO(micafan): Remove this random id, once GCS becomes a service.
ClientID subscribe_id_{ClientID::FromRandom()};
typedef SubscriptionExecutor<ObjectID, ObjectChangeNotification, ObjectTable>
ObjectSubscriptionExecutor;
ObjectSubscriptionExecutor object_sub_executor_;
};
/// \class RedisNodeInfoAccessor
/// RedisNodeInfoAccessor is an implementation of `NodeInfoAccessor`
/// that uses Redis as the backend storage.
class RedisNodeInfoAccessor : public NodeInfoAccessor {
public:
explicit RedisNodeInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisNodeInfoAccessor() {}
Status RegisterSelf(const GcsNodeInfo &local_node_info) override;
Status UnregisterSelf() override;
const ClientID &GetSelfId() const override;
const GcsNodeInfo &GetSelfInfo() const override;
Status AsyncRegister(const GcsNodeInfo &node_info,
const StatusCallback &callback) override;
Status AsyncUnregister(const ClientID &node_id,
const StatusCallback &callback) override;
Status AsyncGetAll(const MultiItemCallback<GcsNodeInfo> &callback) override;
Status AsyncSubscribeToNodeChange(
const SubscribeCallback<ClientID, GcsNodeInfo> &subscribe,
const StatusCallback &done) override;
boost::optional<GcsNodeInfo> Get(const ClientID &node_id) const override;
const std::unordered_map<ClientID, GcsNodeInfo> &GetAll() const override;
bool IsRemoved(const ClientID &node_id) const override;
Status AsyncGetResources(const ClientID &node_id,
const OptionalItemCallback<ResourceMap> &callback) override;
Status AsyncUpdateResources(const ClientID &node_id, const ResourceMap &resources,
const StatusCallback &callback) override;
Status AsyncDeleteResources(const ClientID &node_id,
const std::vector<std::string> &resource_names,
const StatusCallback &callback) override;
Status AsyncSubscribeToResources(
const SubscribeCallback<ClientID, ResourceChangeNotification> &subscribe,
const StatusCallback &done) override;
Status AsyncReportHeartbeat(const std::shared_ptr<HeartbeatTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncSubscribeHeartbeat(
const SubscribeCallback<ClientID, HeartbeatTableData> &subscribe,
const StatusCallback &done) override;
Status AsyncReportBatchHeartbeat(
const std::shared_ptr<HeartbeatBatchTableData> &data_ptr,
const StatusCallback &callback) override;
Status AsyncSubscribeBatchHeartbeat(
const ItemCallback<HeartbeatBatchTableData> &subscribe,
const StatusCallback &done) override;
private:
RedisGcsClient *client_impl_{nullptr};
typedef SubscriptionExecutor<ClientID, ResourceChangeNotification, DynamicResourceTable>
DynamicResourceSubscriptionExecutor;
DynamicResourceSubscriptionExecutor resource_sub_executor_;
typedef SubscriptionExecutor<ClientID, HeartbeatTableData, HeartbeatTable>
HeartbeatSubscriptionExecutor;
HeartbeatSubscriptionExecutor heartbeat_sub_executor_;
typedef SubscriptionExecutor<ClientID, HeartbeatBatchTableData, HeartbeatBatchTable>
HeartbeatBatchSubscriptionExecutor;
HeartbeatBatchSubscriptionExecutor heartbeat_batch_sub_executor_;
};
/// \class RedisErrorInfoAccessor
/// RedisErrorInfoAccessor is an implementation of `ErrorInfoAccessor`
/// that uses Redis as the backend storage.
class RedisErrorInfoAccessor : public ErrorInfoAccessor {
public:
explicit RedisErrorInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisErrorInfoAccessor() = default;
Status AsyncReportJobError(const std::shared_ptr<ErrorTableData> &data_ptr,
const StatusCallback &callback) override;
private:
RedisGcsClient *client_impl_{nullptr};
};
/// \class RedisStatsInfoAccessor
/// RedisStatsInfoAccessor is an implementation of `StatsInfoAccessor`
/// that uses Redis as the backend storage.
class RedisStatsInfoAccessor : public StatsInfoAccessor {
public:
explicit RedisStatsInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisStatsInfoAccessor() = default;
Status AsyncAddProfileData(const std::shared_ptr<ProfileTableData> &data_ptr,
const StatusCallback &callback) override;
private:
RedisGcsClient *client_impl_{nullptr};
};
/// \class RedisWorkerInfoAccessor
/// RedisWorkerInfoAccessor is an implementation of `WorkerInfoAccessor`
/// that uses Redis as the backend storage.
class RedisWorkerInfoAccessor : public WorkerInfoAccessor {
public:
explicit RedisWorkerInfoAccessor(RedisGcsClient *client_impl);
virtual ~RedisWorkerInfoAccessor() = default;
Status AsyncSubscribeToWorkerFailures(
const SubscribeCallback<WorkerID, WorkerFailureData> &subscribe,
const StatusCallback &done) override;
Status AsyncReportWorkerFailure(const std::shared_ptr<WorkerFailureData> &data_ptr,
const StatusCallback &callback) override;
private:
RedisGcsClient *client_impl_{nullptr};
typedef SubscriptionExecutor<WorkerID, WorkerFailureData, WorkerFailureTable>
WorkerFailureSubscriptionExecutor;
WorkerFailureSubscriptionExecutor worker_failure_sub_executor_;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_REDIS_ACCESSOR_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_async_context.cc
|
C++
|
#include "ray/gcs/redis_async_context.h"
extern "C" {
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
}
namespace ray {
namespace gcs {
RedisAsyncContext::RedisAsyncContext(redisAsyncContext *redis_async_context)
: redis_async_context_(redis_async_context) {
RAY_CHECK(redis_async_context_ != nullptr);
}
RedisAsyncContext::~RedisAsyncContext() {
if (redis_async_context_ != nullptr) {
redisAsyncFree(redis_async_context_);
redis_async_context_ = nullptr;
}
}
redisAsyncContext *RedisAsyncContext::GetRawRedisAsyncContext() {
return redis_async_context_;
}
void RedisAsyncContext::ResetRawRedisAsyncContext() {
// Reset redis_async_context_ to nullptr because hiredis has released this context.
redis_async_context_ = nullptr;
}
void RedisAsyncContext::RedisAsyncHandleRead() {
// `redisAsyncHandleRead` will mutate `redis_async_context_`, use a lock to protect
// it.
// This function will execute the callbacks which are registered by
// `redisvAsyncCommand`, `redisAsyncCommandArgv` and so on.
std::lock_guard<std::mutex> lock(mutex_);
redisAsyncHandleRead(redis_async_context_);
}
void RedisAsyncContext::RedisAsyncHandleWrite() {
// `redisAsyncHandleWrite` will mutate `redis_async_context_`, use a lock to protect
// it.
std::lock_guard<std::mutex> lock(mutex_);
redisAsyncHandleWrite(redis_async_context_);
}
Status RedisAsyncContext::RedisAsyncCommand(redisCallbackFn *fn, void *privdata,
const char *format, ...) {
va_list ap;
va_start(ap, format);
int ret_code = 0;
{
// `redisvAsyncCommand` will mutate `redis_async_context_`, use a lock to protect it.
std::lock_guard<std::mutex> lock(mutex_);
if (!redis_async_context_) {
return Status::NotImplemented("...");
}
ret_code = redisvAsyncCommand(redis_async_context_, fn, privdata, format, ap);
}
va_end(ap);
if (ret_code == REDIS_ERR) {
return Status::RedisError(std::string(redis_async_context_->errstr));
}
RAY_CHECK(ret_code == REDIS_OK);
return Status::OK();
}
Status RedisAsyncContext::RedisAsyncCommandArgv(redisCallbackFn *fn, void *privdata,
int argc, const char **argv,
const size_t *argvlen) {
int ret_code = 0;
{
// `redisAsyncCommandArgv` will mutate `redis_async_context_`, use a lock to protect
// it.
std::lock_guard<std::mutex> lock(mutex_);
ret_code =
redisAsyncCommandArgv(redis_async_context_, fn, privdata, argc, argv, argvlen);
}
if (ret_code == REDIS_ERR) {
return Status::RedisError(std::string(redis_async_context_->errstr));
}
RAY_CHECK(ret_code == REDIS_OK);
return Status::OK();
}
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_async_context.h
|
C/C++ Header
|
#ifndef RAY_GCS_REDIS_ASYNC_CONTEXT_H
#define RAY_GCS_REDIS_ASYNC_CONTEXT_H
#include <stdarg.h>
#include <mutex>
#include "ray/common/status.h"
extern "C" {
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
}
namespace ray {
namespace gcs {
/// \class RedisAsyncContext
/// RedisAsyncContext class is a wrapper of hiredis `asyncRedisContext`, providing
/// C++ style and thread-safe API.
class RedisAsyncContext {
public:
explicit RedisAsyncContext(redisAsyncContext *redis_async_context);
~RedisAsyncContext();
/// Get the raw 'redisAsyncContext' pointer.
///
/// \return redisAsyncContext *
redisAsyncContext *GetRawRedisAsyncContext();
/// Reset the raw 'redisAsyncContext' pointer to nullptr.
void ResetRawRedisAsyncContext();
/// Perform command 'redisAsyncHandleRead'. Thread-safe.
void RedisAsyncHandleRead();
/// Perform command 'redisAsyncHandleWrite'. Thread-safe.
void RedisAsyncHandleWrite();
/// Perform command 'redisvAsyncCommand'. Thread-safe.
///
/// \param fn Callback that will be called after the command finishes.
/// \param privdata User-defined pointer.
/// \param format Command format.
/// \param ... Command list.
/// \return Status
Status RedisAsyncCommand(redisCallbackFn *fn, void *privdata, const char *format, ...);
/// Perform command 'redisAsyncCommandArgv'. Thread-safe.
///
/// \param fn Callback that will be called after the command finishes.
/// \param privdata User-defined pointer.
/// \param argc Number of arguments.
/// \param argv Array with arguments.
/// \param argvlen Array with each argument's length.
/// \return Status
Status RedisAsyncCommandArgv(redisCallbackFn *fn, void *privdata, int argc,
const char **argv, const size_t *argvlen);
private:
/// This mutex is used to protect `redis_async_context`.
/// NOTE(micafan): All the `redisAsyncContext`-related functions only manipulate memory
/// data and don't actually do any IO operations. So the perf impact of adding the lock
/// should be minimum.
std::mutex mutex_;
redisAsyncContext *redis_async_context_{nullptr};
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_REDIS_ASYNC_CONTEXT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_context.cc
|
C++
|
#include "ray/gcs/redis_context.h"
#include <unistd.h>
#include <sstream>
#include "ray/stats/stats.h"
#include "ray/util/util.h"
extern "C" {
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
}
// TODO(pcm): Integrate into the C++ tree.
#include "ray/common/ray_config.h"
namespace {
/// A helper function to call the callback and delete it from the callback
/// manager if necessary.
void ProcessCallback(int64_t callback_index,
std::shared_ptr<ray::gcs::CallbackReply> callback_reply) {
RAY_CHECK(callback_index >= 0) << "The callback index must be greater than 0, "
<< "but it actually is " << callback_index;
auto callback_item = ray::gcs::RedisCallbackManager::instance().get(callback_index);
if (!callback_item->is_subscription_) {
// Record the redis latency for non-subscription redis operations.
auto end_time = absl::GetCurrentTimeNanos() / 1000;
ray::stats::RedisLatency().Record(end_time - callback_item->start_time_);
}
// Dispatch the callback.
callback_item->Dispatch(callback_reply);
if (!callback_item->is_subscription_) {
// Delete the callback if it's not a subscription callback.
ray::gcs::RedisCallbackManager::instance().remove(callback_index);
}
}
} // namespace
namespace ray {
namespace gcs {
CallbackReply::CallbackReply(redisReply *redis_reply) : reply_type_(redis_reply->type) {
RAY_CHECK(nullptr != redis_reply);
switch (reply_type_) {
case REDIS_REPLY_NIL: {
break;
}
case REDIS_REPLY_ERROR: {
RAY_CHECK(false) << "Got an error in redis reply: " << redis_reply->str;
break;
}
case REDIS_REPLY_INTEGER: {
int_reply_ = static_cast<int64_t>(redis_reply->integer);
break;
}
case REDIS_REPLY_STATUS: {
const std::string status_str(redis_reply->str, redis_reply->len);
if (status_str == "OK") {
status_reply_ = Status::OK();
} else {
status_reply_ = Status::RedisError(status_str);
}
break;
}
case REDIS_REPLY_STRING: {
string_reply_ = std::string(redis_reply->str, redis_reply->len);
break;
}
case REDIS_REPLY_ARRAY: {
// Array replies are only used for pub-sub messages. Parse the published message.
redisReply *message_type = redis_reply->element[0];
if (strcmp(message_type->str, "subscribe") == 0) {
// If the message is for the initial subscription call, return the empty
// string as a response to signify that subscription was successful.
} else if (strcmp(message_type->str, "message") == 0) {
// If the message is from a PUBLISH, make sure the data is nonempty.
redisReply *message = redis_reply->element[redis_reply->elements - 1];
// data is a notification message.
string_reply_ = std::string(message->str, message->len);
RAY_CHECK(!string_reply_.empty()) << "Empty message received on subscribe channel.";
} else {
RAY_LOG(FATAL) << "This is not a pubsub reply: data=" << message_type->str;
}
break;
}
default: {
RAY_LOG(WARNING) << "Encountered unexpected redis reply type: " << reply_type_;
}
}
}
bool CallbackReply::IsNil() const { return REDIS_REPLY_NIL == reply_type_; }
int64_t CallbackReply::ReadAsInteger() const {
RAY_CHECK(reply_type_ == REDIS_REPLY_INTEGER) << "Unexpected type: " << reply_type_;
return int_reply_;
}
Status CallbackReply::ReadAsStatus() const {
RAY_CHECK(reply_type_ == REDIS_REPLY_STATUS) << "Unexpected type: " << reply_type_;
return status_reply_;
}
std::string CallbackReply::ReadAsString() const {
RAY_CHECK(reply_type_ == REDIS_REPLY_STRING) << "Unexpected type: " << reply_type_;
return string_reply_;
}
std::string CallbackReply::ReadAsPubsubData() const {
RAY_CHECK(reply_type_ == REDIS_REPLY_ARRAY) << "Unexpected type: " << reply_type_;
return string_reply_;
}
// This is a global redis callback which will be registered for every
// asynchronous redis call. It dispatches the appropriate callback
// that was registered with the RedisCallbackManager.
void GlobalRedisCallback(void *c, void *r, void *privdata) {
if (r == nullptr) {
return;
}
int64_t callback_index = reinterpret_cast<int64_t>(privdata);
redisReply *reply = reinterpret_cast<redisReply *>(r);
ProcessCallback(callback_index, std::make_shared<CallbackReply>(reply));
}
int64_t RedisCallbackManager::add(const RedisCallback &function, bool is_subscription,
boost::asio::io_service &io_service) {
auto start_time = absl::GetCurrentTimeNanos() / 1000;
std::lock_guard<std::mutex> lock(mutex_);
callback_items_.emplace(
num_callbacks_,
std::make_shared<CallbackItem>(function, is_subscription, start_time, io_service));
return num_callbacks_++;
}
std::shared_ptr<RedisCallbackManager::CallbackItem> RedisCallbackManager::get(
int64_t callback_index) {
std::lock_guard<std::mutex> lock(mutex_);
RAY_CHECK(callback_items_.find(callback_index) != callback_items_.end());
return callback_items_[callback_index];
}
void RedisCallbackManager::remove(int64_t callback_index) {
std::lock_guard<std::mutex> lock(mutex_);
callback_items_.erase(callback_index);
}
#define REDIS_CHECK_ERROR(CONTEXT, REPLY) \
if (REPLY == nullptr || REPLY->type == REDIS_REPLY_ERROR) { \
return Status::RedisError(CONTEXT->errstr); \
}
RedisContext::~RedisContext() {
if (context_) {
redisFree(context_);
}
}
Status AuthenticateRedis(redisContext *context, const std::string &password) {
if (password == "") {
return Status::OK();
}
redisReply *reply =
reinterpret_cast<redisReply *>(redisCommand(context, "AUTH %s", password.c_str()));
REDIS_CHECK_ERROR(context, reply);
freeReplyObject(reply);
return Status::OK();
}
Status AuthenticateRedis(redisAsyncContext *context, const std::string &password) {
if (password == "") {
return Status::OK();
}
int status = redisAsyncCommand(context, NULL, NULL, "AUTH %s", password.c_str());
if (status == REDIS_ERR) {
return Status::RedisError(std::string(context->errstr));
}
return Status::OK();
}
void RedisAsyncContextDisconnectCallback(const redisAsyncContext *context, int status) {
RAY_LOG(DEBUG) << "Redis async context disconnected. Status: " << status;
// Reset raw 'redisAsyncContext' to nullptr because hiredis will release this context.
reinterpret_cast<RedisAsyncContext *>(context->data)->ResetRawRedisAsyncContext();
}
void SetDisconnectCallback(RedisAsyncContext *redis_async_context) {
redisAsyncContext *raw_redis_async_context =
redis_async_context->GetRawRedisAsyncContext();
raw_redis_async_context->data = redis_async_context;
redisAsyncSetDisconnectCallback(raw_redis_async_context,
RedisAsyncContextDisconnectCallback);
}
template <typename RedisContext, typename RedisConnectFunction>
Status ConnectWithRetries(const std::string &address, int port,
const RedisConnectFunction &connect_function,
RedisContext **context) {
int connection_attempts = 0;
*context = connect_function(address.c_str(), port);
while (*context == nullptr || (*context)->err) {
if (connection_attempts >= RayConfig::instance().redis_db_connect_retries()) {
if (*context == nullptr) {
RAY_LOG(FATAL) << "Could not allocate redis context.";
}
if ((*context)->err) {
RAY_LOG(FATAL) << "Could not establish connection to redis " << address << ":"
<< port << " (context.err = " << (*context)->err << ")";
}
break;
}
RAY_LOG(WARNING) << "Failed to connect to Redis, retrying.";
// Sleep for a little.
usleep(RayConfig::instance().redis_db_connect_wait_milliseconds() * 1000);
*context = connect_function(address.c_str(), port);
connection_attempts += 1;
}
return Status::OK();
}
Status RedisContext::Connect(const std::string &address, int port, bool sharding,
const std::string &password = "") {
RAY_CHECK(!context_);
RAY_CHECK(!redis_async_context_);
RAY_CHECK(!async_redis_subscribe_context_);
RAY_CHECK_OK(ConnectWithRetries(address, port, redisConnect, &context_));
RAY_CHECK_OK(AuthenticateRedis(context_, password));
redisReply *reply = reinterpret_cast<redisReply *>(
redisCommand(context_, "CONFIG SET notify-keyspace-events Kl"));
REDIS_CHECK_ERROR(context_, reply);
freeReplyObject(reply);
// Connect to async context
redisAsyncContext *async_context = nullptr;
RAY_CHECK_OK(ConnectWithRetries(address, port, redisAsyncConnect, &async_context));
RAY_CHECK_OK(AuthenticateRedis(async_context, password));
redis_async_context_.reset(new RedisAsyncContext(async_context));
SetDisconnectCallback(redis_async_context_.get());
// Connect to subscribe context
redisAsyncContext *subscribe_context = nullptr;
RAY_CHECK_OK(ConnectWithRetries(address, port, redisAsyncConnect, &subscribe_context));
RAY_CHECK_OK(AuthenticateRedis(subscribe_context, password));
async_redis_subscribe_context_.reset(new RedisAsyncContext(subscribe_context));
SetDisconnectCallback(async_redis_subscribe_context_.get());
return Status::OK();
}
Status RedisContext::RunArgvAsync(const std::vector<std::string> &args) {
RAY_CHECK(redis_async_context_);
// Build the arguments.
std::vector<const char *> argv;
std::vector<size_t> argc;
for (size_t i = 0; i < args.size(); ++i) {
argv.push_back(args[i].data());
argc.push_back(args[i].size());
}
// Run the Redis command.
Status status = redis_async_context_->RedisAsyncCommandArgv(
nullptr, nullptr, args.size(), argv.data(), argc.data());
return status;
}
Status RedisContext::SubscribeAsync(const ClientID &client_id,
const TablePubsub pubsub_channel,
const RedisCallback &redisCallback,
int64_t *out_callback_index) {
RAY_CHECK(pubsub_channel != TablePubsub::NO_PUBLISH)
<< "Client requested subscribe on a table that does not support pubsub";
RAY_CHECK(async_redis_subscribe_context_);
int64_t callback_index =
RedisCallbackManager::instance().add(redisCallback, true, io_service_);
RAY_CHECK(out_callback_index != nullptr);
*out_callback_index = callback_index;
Status status = Status::OK();
if (client_id.IsNil()) {
// Subscribe to all messages.
std::string redis_command = "SUBSCRIBE %d";
status = async_redis_subscribe_context_->RedisAsyncCommand(
reinterpret_cast<redisCallbackFn *>(&GlobalRedisCallback),
reinterpret_cast<void *>(callback_index), redis_command.c_str(), pubsub_channel);
} else {
// Subscribe only to messages sent to this client.
std::string redis_command = "SUBSCRIBE %d:%b";
status = async_redis_subscribe_context_->RedisAsyncCommand(
reinterpret_cast<redisCallbackFn *>(&GlobalRedisCallback),
reinterpret_cast<void *>(callback_index), redis_command.c_str(), pubsub_channel,
client_id.Data(), client_id.Size());
}
return status;
}
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_context.h
|
C/C++ Header
|
#ifndef RAY_GCS_REDIS_CONTEXT_H
#define RAY_GCS_REDIS_CONTEXT_H
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_map>
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/util/logging.h"
#include "ray/gcs/redis_async_context.h"
#include "ray/protobuf/gcs.pb.h"
extern "C" {
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
}
struct redisContext;
struct redisAsyncContext;
namespace ray {
namespace gcs {
using rpc::TablePrefix;
using rpc::TablePubsub;
/// A simple reply wrapper for redis reply.
class CallbackReply {
public:
explicit CallbackReply(redisReply *redis_reply);
/// Whether this reply is `nil` type reply.
bool IsNil() const;
/// Read this reply data as an integer.
int64_t ReadAsInteger() const;
/// Read this reply data as a status.
Status ReadAsStatus() const;
/// Read this reply data as a string.
///
/// Note that this will return an empty string if
/// the type of this reply is `nil` or `status`.
std::string ReadAsString() const;
/// Read this reply data as pub-sub data.
std::string ReadAsPubsubData() const;
private:
/// Flag indicating the type of reply this represents.
int reply_type_;
/// Reply data if reply_type_ is REDIS_REPLY_INTEGER.
int64_t int_reply_;
/// Reply data if reply_type_ is REDIS_REPLY_STATUS.
Status status_reply_;
/// Reply data if reply_type_ is REDIS_REPLY_STRING or REDIS_REPLY_ARRAY.
/// Note that REDIS_REPLY_ARRAY is only used for pub-sub data.
std::string string_reply_;
};
/// Every callback should take in a vector of the results from the Redis
/// operation.
using RedisCallback = std::function<void(std::shared_ptr<CallbackReply>)>;
void GlobalRedisCallback(void *c, void *r, void *privdata);
class RedisCallbackManager {
public:
static RedisCallbackManager &instance() {
static RedisCallbackManager instance;
return instance;
}
struct CallbackItem : public std::enable_shared_from_this<CallbackItem> {
CallbackItem() = default;
CallbackItem(const RedisCallback &callback, bool is_subscription, int64_t start_time,
boost::asio::io_service &io_service)
: callback_(callback),
is_subscription_(is_subscription),
start_time_(start_time),
io_service_(&io_service) {}
void Dispatch(std::shared_ptr<CallbackReply> &reply) {
std::shared_ptr<CallbackItem> self = shared_from_this();
if (callback_ != nullptr) {
io_service_->post([self, reply]() { self->callback_(std::move(reply)); });
}
}
RedisCallback callback_;
bool is_subscription_;
int64_t start_time_;
boost::asio::io_service *io_service_;
};
int64_t add(const RedisCallback &function, bool is_subscription,
boost::asio::io_service &io_service);
std::shared_ptr<CallbackItem> get(int64_t callback_index);
/// Remove a callback.
void remove(int64_t callback_index);
private:
RedisCallbackManager() : num_callbacks_(0){};
~RedisCallbackManager() {}
std::mutex mutex_;
int64_t num_callbacks_ = 0;
std::unordered_map<int64_t, std::shared_ptr<CallbackItem>> callback_items_;
};
class RedisContext {
public:
RedisContext(boost::asio::io_service &io_service)
: io_service_(io_service), context_(nullptr) {}
~RedisContext();
Status Connect(const std::string &address, int port, bool sharding,
const std::string &password);
/// Run an operation on some table key synchronously.
///
/// \param command The command to run. This must match a registered Ray Redis
/// command. These are strings of the format "RAY.TABLE_*".
/// \param id The table key to run the operation at.
/// \param data The data to add to the table key, if any.
/// \param length The length of the data to be added, if data is provided.
/// \param prefix The prefix of table key.
/// \param pubsub_channel The channel that update operations to the table
/// should be published on.
/// \param log_length The RAY.TABLE_APPEND command takes in an optional index
/// at which the data must be appended. For all other commands, set to
/// -1 for unused. If set, then data must be provided.
/// \return The reply from redis.
template <typename ID>
std::shared_ptr<CallbackReply> RunSync(const std::string &command, const ID &id,
const void *data, size_t length,
const TablePrefix prefix,
const TablePubsub pubsub_channel,
int log_length = -1);
/// Run an operation on some table key.
///
/// \param command The command to run. This must match a registered Ray Redis
/// command. These are strings of the format "RAY.TABLE_*".
/// \param id The table key to run the operation at.
/// \param data The data to add to the table key, if any.
/// \param length The length of the data to be added, if data is provided.
/// \param prefix The prefix of table key.
/// \param pubsub_channel The channel that update operations to the table
/// should be published on.
/// \param redisCallback The Redis callback function.
/// \param log_length The RAY.TABLE_APPEND command takes in an optional index
/// at which the data must be appended. For all other commands, set to
/// -1 for unused. If set, then data must be provided.
/// \return Status.
template <typename ID>
Status RunAsync(const std::string &command, const ID &id, const void *data,
size_t length, const TablePrefix prefix,
const TablePubsub pubsub_channel, RedisCallback redisCallback,
int log_length = -1);
/// Run an arbitrary Redis command without a callback.
///
/// \param args The vector of command args to pass to Redis.
/// \return Status.
Status RunArgvAsync(const std::vector<std::string> &args);
/// Subscribe to a specific Pub-Sub channel.
///
/// \param client_id The client ID that subscribe this message.
/// \param pubsub_channel The Pub-Sub channel to subscribe to.
/// \param redisCallback The callback function that the notification calls.
/// \param out_callback_index The output pointer to callback index.
/// \return Status.
Status SubscribeAsync(const ClientID &client_id, const TablePubsub pubsub_channel,
const RedisCallback &redisCallback, int64_t *out_callback_index);
redisContext *sync_context() {
RAY_CHECK(context_);
return context_;
}
RedisAsyncContext &async_context() {
RAY_CHECK(redis_async_context_);
return *redis_async_context_;
}
RedisAsyncContext &subscribe_context() {
RAY_CHECK(async_redis_subscribe_context_);
return *async_redis_subscribe_context_;
}
private:
boost::asio::io_service &io_service_;
redisContext *context_;
std::unique_ptr<RedisAsyncContext> redis_async_context_;
std::unique_ptr<RedisAsyncContext> async_redis_subscribe_context_;
};
template <typename ID>
Status RedisContext::RunAsync(const std::string &command, const ID &id, const void *data,
size_t length, const TablePrefix prefix,
const TablePubsub pubsub_channel,
RedisCallback redisCallback, int log_length) {
RAY_CHECK(redis_async_context_);
int64_t callback_index =
RedisCallbackManager::instance().add(redisCallback, false, io_service_);
Status status = Status::OK();
if (length > 0) {
if (log_length >= 0) {
std::string redis_command = command + " %d %d %b %b %d";
status = redis_async_context_->RedisAsyncCommand(
reinterpret_cast<redisCallbackFn *>(&GlobalRedisCallback),
reinterpret_cast<void *>(callback_index), redis_command.c_str(), prefix,
pubsub_channel, id.Data(), id.Size(), data, length, log_length);
} else {
std::string redis_command = command + " %d %d %b %b";
status = redis_async_context_->RedisAsyncCommand(
reinterpret_cast<redisCallbackFn *>(&GlobalRedisCallback),
reinterpret_cast<void *>(callback_index), redis_command.c_str(), prefix,
pubsub_channel, id.Data(), id.Size(), data, length);
}
} else {
RAY_CHECK(log_length == -1);
std::string redis_command = command + " %d %d %b";
status = redis_async_context_->RedisAsyncCommand(
reinterpret_cast<redisCallbackFn *>(&GlobalRedisCallback),
reinterpret_cast<void *>(callback_index), redis_command.c_str(), prefix,
pubsub_channel, id.Data(), id.Size());
}
return status;
}
template <typename ID>
std::shared_ptr<CallbackReply> RedisContext::RunSync(
const std::string &command, const ID &id, const void *data, size_t length,
const TablePrefix prefix, const TablePubsub pubsub_channel, int log_length) {
RAY_CHECK(context_);
void *redis_reply = nullptr;
if (length > 0) {
if (log_length >= 0) {
std::string redis_command = command + " %d %d %b %b %d";
redis_reply = redisCommand(context_, redis_command.c_str(), prefix, pubsub_channel,
id.Data(), id.Size(), data, length, log_length);
} else {
std::string redis_command = command + " %d %d %b %b";
redis_reply = redisCommand(context_, redis_command.c_str(), prefix, pubsub_channel,
id.Data(), id.Size(), data, length);
}
} else {
RAY_CHECK(log_length == -1);
std::string redis_command = command + " %d %d %b";
redis_reply = redisCommand(context_, redis_command.c_str(), prefix, pubsub_channel,
id.Data(), id.Size());
}
if (redis_reply == nullptr) {
RAY_LOG(INFO) << "Run redis command failed , err is " << context_->err;
return nullptr;
} else {
std::shared_ptr<CallbackReply> callback_reply =
std::make_shared<CallbackReply>(reinterpret_cast<redisReply *>(redis_reply));
freeReplyObject(redis_reply);
return callback_reply;
}
}
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_REDIS_CONTEXT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_gcs_client.cc
|
C++
|
#include "ray/gcs/redis_gcs_client.h"
#include <unistd.h>
#include "ray/common/ray_config.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_context.h"
static void GetRedisShards(redisContext *context, std::vector<std::string> &addresses,
std::vector<int> &ports) {
// Get the total number of Redis shards in the system.
int num_attempts = 0;
redisReply *reply = nullptr;
while (num_attempts < RayConfig::instance().redis_db_connect_retries()) {
// Try to read the number of Redis shards from the primary shard. If the
// entry is present, exit.
reply = reinterpret_cast<redisReply *>(redisCommand(context, "GET NumRedisShards"));
if (reply->type != REDIS_REPLY_NIL) {
break;
}
// Sleep for a little, and try again if the entry isn't there yet. */
freeReplyObject(reply);
usleep(RayConfig::instance().redis_db_connect_wait_milliseconds() * 1000);
num_attempts++;
}
RAY_CHECK(num_attempts < RayConfig::instance().redis_db_connect_retries())
<< "No entry found for NumRedisShards";
RAY_CHECK(reply->type == REDIS_REPLY_STRING)
<< "Expected string, found Redis type " << reply->type << " for NumRedisShards";
int num_redis_shards = atoi(reply->str);
RAY_CHECK(num_redis_shards >= 1) << "Expected at least one Redis shard, "
<< "found " << num_redis_shards;
freeReplyObject(reply);
// Get the addresses of all of the Redis shards.
num_attempts = 0;
while (num_attempts < RayConfig::instance().redis_db_connect_retries()) {
// Try to read the Redis shard locations from the primary shard. If we find
// that all of them are present, exit.
reply =
reinterpret_cast<redisReply *>(redisCommand(context, "LRANGE RedisShards 0 -1"));
if (static_cast<int>(reply->elements) == num_redis_shards) {
break;
}
// Sleep for a little, and try again if not all Redis shard addresses have
// been added yet.
freeReplyObject(reply);
usleep(RayConfig::instance().redis_db_connect_wait_milliseconds() * 1000);
num_attempts++;
}
RAY_CHECK(num_attempts < RayConfig::instance().redis_db_connect_retries())
<< "Expected " << num_redis_shards << " Redis shard addresses, found "
<< reply->elements;
// Parse the Redis shard addresses.
for (size_t i = 0; i < reply->elements; ++i) {
// Parse the shard addresses and ports.
RAY_CHECK(reply->element[i]->type == REDIS_REPLY_STRING);
std::string addr;
std::stringstream ss(reply->element[i]->str);
getline(ss, addr, ':');
addresses.push_back(addr);
int port;
ss >> port;
ports.push_back(port);
}
freeReplyObject(reply);
}
namespace ray {
namespace gcs {
RedisGcsClient::RedisGcsClient(const GcsClientOptions &options)
: GcsClient(options), command_type_(CommandType::kRegular) {}
RedisGcsClient::RedisGcsClient(const GcsClientOptions &options, CommandType command_type)
: GcsClient(options), command_type_(command_type) {}
Status RedisGcsClient::Connect(boost::asio::io_service &io_service) {
RAY_CHECK(!is_connected_);
if (options_.server_ip_.empty()) {
RAY_LOG(ERROR) << "Failed to connect, gcs service address is empty.";
return Status::Invalid("gcs service address is invalid!");
}
primary_context_ = std::make_shared<RedisContext>(io_service);
RAY_CHECK_OK(primary_context_->Connect(options_.server_ip_, options_.server_port_,
/*sharding=*/true,
/*password=*/options_.password_));
if (!options_.is_test_client_) {
// Moving sharding into constructor defaultly means that sharding = true.
// This design decision may worth a look.
std::vector<std::string> addresses;
std::vector<int> ports;
GetRedisShards(primary_context_->sync_context(), addresses, ports);
if (addresses.empty()) {
RAY_CHECK(ports.empty());
addresses.push_back(options_.server_ip_);
ports.push_back(options_.server_port_);
}
for (size_t i = 0; i < addresses.size(); ++i) {
// Populate shard_contexts.
shard_contexts_.push_back(std::make_shared<RedisContext>(io_service));
RAY_CHECK_OK(shard_contexts_[i]->Connect(addresses[i], ports[i], /*sharding=*/true,
/*password=*/options_.password_));
}
} else {
shard_contexts_.push_back(std::make_shared<RedisContext>(io_service));
RAY_CHECK_OK(shard_contexts_[0]->Connect(options_.server_ip_, options_.server_port_,
/*sharding=*/true,
/*password=*/options_.password_));
}
Attach(io_service);
actor_table_.reset(new ActorTable({primary_context_}, this));
// TODO(micafan) Modify ClientTable' Constructor(remove ClientID) in future.
// We will use NodeID instead of ClientID.
// For worker/driver, it might not have this field(NodeID).
// For raylet, NodeID should be initialized in raylet layer(not here).
client_table_.reset(new ClientTable({primary_context_}, this));
error_table_.reset(new ErrorTable({primary_context_}, this));
job_table_.reset(new JobTable({primary_context_}, this));
heartbeat_batch_table_.reset(new HeartbeatBatchTable({primary_context_}, this));
// Tables below would be sharded.
object_table_.reset(new ObjectTable(shard_contexts_, this));
raylet_task_table_.reset(new raylet::TaskTable(shard_contexts_, this, command_type_));
task_reconstruction_log_.reset(new TaskReconstructionLog(shard_contexts_, this));
task_lease_table_.reset(new TaskLeaseTable(shard_contexts_, this));
heartbeat_table_.reset(new HeartbeatTable(shard_contexts_, this));
profile_table_.reset(new ProfileTable(shard_contexts_, this));
actor_checkpoint_table_.reset(new ActorCheckpointTable(shard_contexts_, this));
actor_checkpoint_id_table_.reset(new ActorCheckpointIdTable(shard_contexts_, this));
resource_table_.reset(new DynamicResourceTable({primary_context_}, this));
worker_failure_table_.reset(new WorkerFailureTable(shard_contexts_, this));
actor_accessor_.reset(new RedisActorInfoAccessor(this));
job_accessor_.reset(new RedisJobInfoAccessor(this));
object_accessor_.reset(new RedisObjectInfoAccessor(this));
node_accessor_.reset(new RedisNodeInfoAccessor(this));
task_accessor_.reset(new RedisTaskInfoAccessor(this));
error_accessor_.reset(new RedisErrorInfoAccessor(this));
stats_accessor_.reset(new RedisStatsInfoAccessor(this));
worker_accessor_.reset(new RedisWorkerInfoAccessor(this));
is_connected_ = true;
RAY_LOG(INFO) << "RedisGcsClient Connected.";
return Status::OK();
}
void RedisGcsClient::Disconnect() {
RAY_CHECK(is_connected_);
is_connected_ = false;
RAY_LOG(INFO) << "RedisGcsClient Disconnected.";
// TODO(micafan): Synchronously unregister node if this client is Raylet.
}
void RedisGcsClient::Attach(boost::asio::io_service &io_service) {
// Take care of sharding contexts.
RAY_CHECK(shard_asio_async_clients_.empty()) << "Attach shall be called only once";
for (std::shared_ptr<RedisContext> context : shard_contexts_) {
shard_asio_async_clients_.emplace_back(
new RedisAsioClient(io_service, context->async_context()));
shard_asio_subscribe_clients_.emplace_back(
new RedisAsioClient(io_service, context->subscribe_context()));
}
asio_async_auxiliary_client_.reset(
new RedisAsioClient(io_service, primary_context_->async_context()));
asio_subscribe_auxiliary_client_.reset(
new RedisAsioClient(io_service, primary_context_->subscribe_context()));
}
std::string RedisGcsClient::DebugString() const {
std::stringstream result;
result << "RedisGcsClient:";
result << "\n- TaskTable: " << raylet_task_table_->DebugString();
result << "\n- ActorTable: " << actor_table_->DebugString();
result << "\n- TaskReconstructionLog: " << task_reconstruction_log_->DebugString();
result << "\n- TaskLeaseTable: " << task_lease_table_->DebugString();
result << "\n- HeartbeatTable: " << heartbeat_table_->DebugString();
result << "\n- ErrorTable: " << error_table_->DebugString();
result << "\n- ProfileTable: " << profile_table_->DebugString();
result << "\n- ClientTable: " << client_table_->DebugString();
result << "\n- JobTable: " << job_table_->DebugString();
return result.str();
}
ObjectTable &RedisGcsClient::object_table() { return *object_table_; }
raylet::TaskTable &RedisGcsClient::raylet_task_table() { return *raylet_task_table_; }
ActorTable &RedisGcsClient::actor_table() { return *actor_table_; }
WorkerFailureTable &RedisGcsClient::worker_failure_table() {
return *worker_failure_table_;
}
TaskReconstructionLog &RedisGcsClient::task_reconstruction_log() {
return *task_reconstruction_log_;
}
TaskLeaseTable &RedisGcsClient::task_lease_table() { return *task_lease_table_; }
ClientTable &RedisGcsClient::client_table() { return *client_table_; }
HeartbeatTable &RedisGcsClient::heartbeat_table() { return *heartbeat_table_; }
HeartbeatBatchTable &RedisGcsClient::heartbeat_batch_table() {
return *heartbeat_batch_table_;
}
ErrorTable &RedisGcsClient::error_table() { return *error_table_; }
JobTable &RedisGcsClient::job_table() { return *job_table_; }
ProfileTable &RedisGcsClient::profile_table() { return *profile_table_; }
ActorCheckpointTable &RedisGcsClient::actor_checkpoint_table() {
return *actor_checkpoint_table_;
}
ActorCheckpointIdTable &RedisGcsClient::actor_checkpoint_id_table() {
return *actor_checkpoint_id_table_;
}
DynamicResourceTable &RedisGcsClient::resource_table() { return *resource_table_; }
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_gcs_client.h
|
C/C++ Header
|
#ifndef RAY_GCS_REDIS_GCS_CLIENT_H
#define RAY_GCS_REDIS_GCS_CLIENT_H
#include <map>
#include <string>
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/gcs/asio.h"
#include "ray/gcs/gcs_client.h"
#include "ray/gcs/tables.h"
#include "ray/util/logging.h"
namespace ray {
namespace gcs {
class RedisContext;
class RAY_EXPORT RedisGcsClient : public GcsClient {
public:
/// Constructor of RedisGcsClient.
/// Connect() must be called(and return ok) before you call any other methods.
/// TODO(micafan) To read and write from the GCS tables requires a further
/// call to Connect() to the client table. Will fix this in next pr.
///
/// \param options Options of this client, e.g. server address, password and so on.
RedisGcsClient(const GcsClientOptions &options);
/// This constructor is only used for testing.
/// Connect() must be called(and return ok) before you call any other methods.
///
/// \param options Options of this client, e.g. server address, password and so on.
/// \param command_type The commands issued type.
RedisGcsClient(const GcsClientOptions &options, CommandType command_type);
/// Connect to GCS Service. Non-thread safe.
/// Call this function before calling other functions.
///
/// \param io_service The event loop for this client.
/// Must be single-threaded io_service (get more information from RedisAsioClient).
///
/// \return Status
Status Connect(boost::asio::io_service &io_service) override;
/// Disconnect with GCS Service. Non-thread safe.
void Disconnect() override;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const override;
// We also need something to export generic code to run on workers from the
// driver (to set the PYTHONPATH)
using GetExportCallback = std::function<void(const std::string &data)>;
Status AddExport(const std::string &job_id, std::string &export_data);
Status GetExport(const std::string &job_id, int64_t export_index,
const GetExportCallback &done_callback);
std::vector<std::shared_ptr<RedisContext>> shard_contexts() { return shard_contexts_; }
std::shared_ptr<RedisContext> primary_context() { return primary_context_; }
/// The following xxx_table methods implement the Accessor interfaces.
/// Implements the Actors() interface.
ActorTable &actor_table();
ActorCheckpointTable &actor_checkpoint_table();
ActorCheckpointIdTable &actor_checkpoint_id_table();
/// Implements the Jobs() interface.
JobTable &job_table();
/// Implements the Objects() interface.
ObjectTable &object_table();
/// Implements the Nodes() interface.
ClientTable &client_table();
HeartbeatTable &heartbeat_table();
HeartbeatBatchTable &heartbeat_batch_table();
DynamicResourceTable &resource_table();
/// Implements the Tasks() interface.
raylet::TaskTable &raylet_task_table();
TaskLeaseTable &task_lease_table();
TaskReconstructionLog &task_reconstruction_log();
/// Implements the Errors() interface.
// TODO: Some API for getting the error on the driver
ErrorTable &error_table();
/// Implements the Stats() interface.
ProfileTable &profile_table();
/// Implements the Workers() interface.
WorkerFailureTable &worker_failure_table();
private:
/// Attach this client to an asio event loop. Note that only
/// one event loop should be attached at a time.
void Attach(boost::asio::io_service &io_service);
// GCS command type. If CommandType::kChain, chain-replicated versions of the tables
// might be used, if available.
CommandType command_type_{CommandType::kUnknown};
std::unique_ptr<ObjectTable> object_table_;
std::unique_ptr<raylet::TaskTable> raylet_task_table_;
std::unique_ptr<ActorTable> actor_table_;
std::unique_ptr<TaskReconstructionLog> task_reconstruction_log_;
std::unique_ptr<TaskLeaseTable> task_lease_table_;
std::unique_ptr<HeartbeatTable> heartbeat_table_;
std::unique_ptr<HeartbeatBatchTable> heartbeat_batch_table_;
std::unique_ptr<ErrorTable> error_table_;
std::unique_ptr<ProfileTable> profile_table_;
std::unique_ptr<ClientTable> client_table_;
std::unique_ptr<ActorCheckpointTable> actor_checkpoint_table_;
std::unique_ptr<ActorCheckpointIdTable> actor_checkpoint_id_table_;
std::unique_ptr<DynamicResourceTable> resource_table_;
std::unique_ptr<WorkerFailureTable> worker_failure_table_;
// The following contexts write to the data shard
std::vector<std::shared_ptr<RedisContext>> shard_contexts_;
std::vector<std::unique_ptr<RedisAsioClient>> shard_asio_async_clients_;
std::vector<std::unique_ptr<RedisAsioClient>> shard_asio_subscribe_clients_;
// The following context writes everything to the primary shard
std::shared_ptr<RedisContext> primary_context_;
std::unique_ptr<JobTable> job_table_;
std::unique_ptr<RedisAsioClient> asio_async_auxiliary_client_;
std::unique_ptr<RedisAsioClient> asio_subscribe_auxiliary_client_;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_REDIS_GCS_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_module/chain_module.h
|
C/C++ Header
|
#ifndef RAY_CHAIN_MODULE_H_
#define RAY_CHAIN_MODULE_H_
#include <functional>
#include "redismodule.h"
// NOTE(zongheng): this duplicated declaration serves as forward-declaration
// only. The implementation is supposed to be linked in from credis. In
// principle, we can expose a header from credis and simple include that header.
// This is left as future work.
//
// Concrete definitions from credis (from an example commit):
// https://github.com/ray-project/credis/blob/7eae7f2e58d16dfa1a95b5dfab02549f54b94e5d/src/member.cc#L41
// https://github.com/ray-project/credis/blob/7eae7f2e58d16dfa1a95b5dfab02549f54b94e5d/src/master.cc#L36
// Typical usage to make an existing redismodule command chain-compatible:
//
// extern RedisChainModule module;
// int MyCmd_RedisModuleCmd(...) {
// return module.Mutate(..., NodeFunc, TailFunc);
// }
//
// See, for instance, ChainTableAdd_RedisCommand in ray_redis_module.cc.
class RedisChainModule {
public:
// A function that runs on every node in the chain. Type:
// (context, argv, argc, (can be nullptr) mutated_key_str) -> int
//
// (Advanced) The optional fourth arg can be used in the following way:
//
// RedisModuleString* redis_key_str = nullptr;
// node_func(ctx, argv, argc, &redis_key_str);
// // "redis_key_str" now points to the RedisModuleString whose contents
// // is mutated by "node_func".
//
// If the fourth arg is passed, NodeFunc *must* fill in the key being mutated.
// It is okay for this NodeFunc to call "RM_FreeString(mutated_key_str)" after
// assigning the fourth arg, since that call presumably only decrements a ref
// count.
using NodeFunc = std::function<int(RedisModuleCtx *, RedisModuleString **, int,
RedisModuleString **)>;
// A function that (1) runs only after all NodeFunc's have run, and (2) runs
// once on the tail. A typical usage is to publish a write.
using TailFunc = std::function<int(RedisModuleCtx *, RedisModuleString **, int)>;
// TODO(zongheng): document the RM_Reply semantics.
// Runs "node_func" on every node in the chain; after the tail node has run it
// too, finalizes the mutation by running "tail_func".
//
// If node_func() returns non-zero, it is treated as an error and the entire
// update will terminate early, without running subsequent node_func() and the
// final tail_func().
//
// TODO(zongheng): currently only supports 1-node chain.
int ChainReplicate(RedisModuleCtx *ctx, RedisModuleString **argv, int argc,
NodeFunc node_func, TailFunc tail_func);
};
#endif // RAY_CHAIN_MODULE_H_
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_module/ray_redis_module.cc
|
C++
|
#include <string.h>
#include <sstream>
#include "ray/common/common_protocol.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/protobuf/gcs.pb.h"
#include "ray/util/logging.h"
#include "redis_string.h"
#include "redismodule.h"
using ray::Status;
using ray::rpc::GcsChangeMode;
using ray::rpc::GcsEntry;
using ray::rpc::TablePrefix;
using ray::rpc::TablePubsub;
#if RAY_USE_NEW_GCS
// Under this flag, ray-project/credis will be loaded. Specifically, via
// "path/redis-server --loadmodule <credis module> --loadmodule <current
// libray_redis_module>" (dlopen() under the hood) will a definition of "module"
// be supplied.
//
// All commands in this file that depend on "module" must be wrapped by "#if
// RAY_USE_NEW_GCS", until we switch to this launch configuration as the
// default.
#include "chain_module.h"
extern RedisChainModule module;
#endif
#define REPLY_AND_RETURN_IF_FALSE(CONDITION, MESSAGE) \
if (!(CONDITION)) { \
RedisModule_ReplyWithError(ctx, (MESSAGE)); \
return REDISMODULE_ERR; \
}
// This macro can be used at the top level of redis module.
#define REPLY_AND_RETURN_IF_NOT_OK(STATUS) \
{ \
auto status = (STATUS); \
if (!status.ok()) { \
RedisModule_ReplyWithError(ctx, status.message().c_str()); \
return REDISMODULE_ERR; \
} \
}
// Wrap a Redis command with automatic memory management.
#define AUTO_MEMORY(FUNC) \
int FUNC(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { \
RedisModule_AutoMemory(ctx); \
return internal_redis_commands::FUNC(ctx, argv, argc); \
}
// Commands in this namespace should not be used directly. They should first be
// wrapped with AUTO_MEMORY in the global namespace to enable automatic memory
// management.
// TODO(swang): Ideally, we would make the commands that don't have auto memory
// management inaccessible instead of just using a separate namespace.
namespace internal_redis_commands {
/// Map from pub sub channel to clients that are waiting on that channel.
std::unordered_map<std::string, std::vector<std::string>> notification_map;
/// Parse a Redis string into a TablePubsub channel.
Status ParseTablePubsub(TablePubsub *out, const RedisModuleString *pubsub_channel_str) {
long long pubsub_channel_long;
if (RedisModule_StringToLongLong(pubsub_channel_str, &pubsub_channel_long) !=
REDISMODULE_OK) {
return Status::RedisError("Pubsub channel must be a valid integer.");
}
if (pubsub_channel_long >= static_cast<long long>(TablePubsub::TABLE_PUBSUB_MAX) ||
pubsub_channel_long <= static_cast<long long>(TablePubsub::TABLE_PUBSUB_MIN)) {
return Status::RedisError("Pubsub channel must be in the TablePubsub range.");
} else {
*out = static_cast<TablePubsub>(pubsub_channel_long);
return Status::OK();
}
}
/// Format a pubsub channel for a specific key. pubsub_channel_str should
/// contain a valid TablePubsub.
Status FormatPubsubChannel(RedisModuleString **out, RedisModuleCtx *ctx,
const RedisModuleString *pubsub_channel_str,
const RedisModuleString *id) {
// Format the pubsub channel enum to a string. TablePubsub_MAX should be more
// than enough digits, but add 1 just in case for the null terminator.
char pubsub_channel[static_cast<int>(TablePubsub::TABLE_PUBSUB_MAX) + 1];
TablePubsub table_pubsub;
RAY_RETURN_NOT_OK(ParseTablePubsub(&table_pubsub, pubsub_channel_str));
sprintf(pubsub_channel, "%d", static_cast<int>(table_pubsub));
*out = RedisString_Format(ctx, "%s:%S", pubsub_channel, id);
return Status::OK();
}
/// Parse a Redis string into a TablePrefix channel.
Status ParseTablePrefix(const RedisModuleString *table_prefix_str, TablePrefix *out) {
long long table_prefix_long;
if (RedisModule_StringToLongLong(table_prefix_str, &table_prefix_long) !=
REDISMODULE_OK) {
return Status::RedisError("Prefix must be a valid TablePrefix integer");
}
if (table_prefix_long >= static_cast<long long>(TablePrefix::TABLE_PREFIX_MAX) ||
table_prefix_long <= static_cast<long long>(TablePrefix::TABLE_PREFIX_MIN)) {
return Status::RedisError("Prefix must be in the TablePrefix range");
} else {
*out = static_cast<TablePrefix>(table_prefix_long);
return Status::OK();
}
}
/// Format the string for a table key. `prefix_enum` must be a valid
/// TablePrefix as a RedisModuleString. `keyname` is usually a UniqueID as a
/// RedisModuleString.
RedisModuleString *PrefixedKeyString(RedisModuleCtx *ctx, RedisModuleString *prefix_enum,
RedisModuleString *keyname) {
TablePrefix prefix;
if (!ParseTablePrefix(prefix_enum, &prefix).ok()) {
return nullptr;
}
return RedisString_Format(ctx, "%s%S", TablePrefix_Name(prefix).c_str(), keyname);
}
// TODO(swang): This helper function should be deprecated by the version below,
// which uses enums for table prefixes.
RedisModuleKey *OpenPrefixedKey(RedisModuleCtx *ctx, const char *prefix,
RedisModuleString *keyname, int mode,
RedisModuleString **mutated_key_str) {
RedisModuleString *prefixed_keyname = RedisString_Format(ctx, "%s%S", prefix, keyname);
// Pass out the key being mutated, should the caller request so.
if (mutated_key_str != nullptr) {
*mutated_key_str = prefixed_keyname;
}
RedisModuleKey *key = reinterpret_cast<RedisModuleKey *>(
RedisModule_OpenKey(ctx, prefixed_keyname, mode));
return key;
}
Status OpenPrefixedKey(RedisModuleKey **out, RedisModuleCtx *ctx,
RedisModuleString *prefix_enum, RedisModuleString *keyname,
int mode, RedisModuleString **mutated_key_str) {
TablePrefix prefix;
RAY_RETURN_NOT_OK(ParseTablePrefix(prefix_enum, &prefix));
*out = OpenPrefixedKey(ctx, TablePrefix_Name(prefix).c_str(), keyname, mode,
mutated_key_str);
return Status::OK();
}
RedisModuleKey *OpenPrefixedKey(RedisModuleCtx *ctx, const char *prefix,
RedisModuleString *keyname, int mode) {
return OpenPrefixedKey(ctx, prefix, keyname, mode,
/*mutated_key_str=*/nullptr);
}
Status OpenPrefixedKey(RedisModuleKey **out, RedisModuleCtx *ctx,
RedisModuleString *prefix_enum, RedisModuleString *keyname,
int mode) {
return OpenPrefixedKey(out, ctx, prefix_enum, keyname, mode,
/*mutated_key_str=*/nullptr);
}
/// Open the key used to store the channels that should be published to when an
/// update happens at the given keyname.
Status GetBroadcastKey(RedisModuleCtx *ctx, RedisModuleString *pubsub_channel_str,
RedisModuleString *keyname, std::string *out) {
RedisModuleString *channel;
RAY_RETURN_NOT_OK(FormatPubsubChannel(&channel, ctx, pubsub_channel_str, keyname));
RedisModuleString *prefixed_keyname = RedisString_Format(ctx, "BCAST:%S", channel);
*out = RedisString_ToString(prefixed_keyname);
return Status::OK();
}
/// A helper function that creates `GcsEntry` protobuf object.
///
/// \param[in] id Id of the entry.
/// \param[in] change_mode Change mode of the entry.
/// \param[in] entries Vector of entries.
/// \param[out] result The created `GcsEntry` object.
inline void CreateGcsEntry(RedisModuleString *id, GcsChangeMode change_mode,
const std::vector<RedisModuleString *> &entries,
GcsEntry *result) {
const char *data;
size_t size;
data = RedisModule_StringPtrLen(id, &size);
result->set_id(data, size);
result->set_change_mode(change_mode);
for (const auto &entry : entries) {
data = RedisModule_StringPtrLen(entry, &size);
result->add_entries(data, size);
}
}
/// Helper method to publish formatted data to target channel.
///
/// \param pubsub_channel_str The pubsub channel name that notifications for
/// this key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key that the notification is about.
/// \param data_buffer The data to publish, which is a GcsEntry buffer.
/// \return OK if there is no error during a publish.
int PublishDataHelper(RedisModuleCtx *ctx, RedisModuleString *pubsub_channel_str,
RedisModuleString *id, RedisModuleString *data_buffer) {
// Write the data back to any subscribers that are listening to all table
// notifications.
RedisModuleCallReply *reply =
RedisModule_Call(ctx, "PUBLISH", "ss", pubsub_channel_str, data_buffer);
if (reply == NULL) {
return RedisModule_ReplyWithError(ctx, "error during PUBLISH");
}
std::string notification_key;
REPLY_AND_RETURN_IF_NOT_OK(
GetBroadcastKey(ctx, pubsub_channel_str, id, ¬ification_key));
// Publish the data to any clients who requested notifications on this key.
auto it = notification_map.find(notification_key);
if (it != notification_map.end()) {
for (const std::string &client_channel : it->second) {
// RedisModule_Call seems to be broken and cannot accept "bb",
// therefore we construct a temporary redis string here, which
// will be garbage collected by redis.
auto channel =
RedisModule_CreateString(ctx, client_channel.data(), client_channel.size());
RedisModuleCallReply *reply =
RedisModule_Call(ctx, "PUBLISH", "ss", channel, data_buffer);
if (reply == NULL) {
return RedisModule_ReplyWithError(ctx, "error during PUBLISH");
}
}
}
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
/// Publish a notification for an entry update at a key. This publishes a
/// notification to all subscribers of the table, as well as every client that
/// has requested notifications for this key.
///
/// \param pubsub_channel_str The pubsub channel name that notifications for
/// this key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key that the notification is about.
/// \param mode the update mode, such as append or remove.
/// \param data The appended/removed data.
/// \return OK if there is no error during a publish.
int PublishTableUpdate(RedisModuleCtx *ctx, RedisModuleString *pubsub_channel_str,
RedisModuleString *id, GcsChangeMode change_mode,
RedisModuleString *data) {
// Serialize the notification to send.
GcsEntry gcs_entry;
CreateGcsEntry(id, change_mode, {data}, &gcs_entry);
std::string str = gcs_entry.SerializeAsString();
auto data_buffer = RedisModule_CreateString(ctx, str.data(), str.size());
return PublishDataHelper(ctx, pubsub_channel_str, id, data_buffer);
}
// RAY.TABLE_ADD:
// TableAdd_RedisCommand: the actual command handler.
// (helper) TableAdd_DoWrite: performs the write to redis state.
// (helper) TableAdd_DoPublish: performs a publish after the write.
// ChainTableAdd_RedisCommand: the same command, chain-enabled.
int TableAdd_DoWrite(RedisModuleCtx *ctx, RedisModuleString **argv, int argc,
RedisModuleString **mutated_key_str) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
RedisModuleKey *key;
REPLY_AND_RETURN_IF_NOT_OK(OpenPrefixedKey(
&key, ctx, prefix_str, id, REDISMODULE_READ | REDISMODULE_WRITE, mutated_key_str));
RedisModule_StringSet(key, data);
return REDISMODULE_OK;
}
int TableAdd_DoPublish(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
TablePubsub pubsub_channel;
REPLY_AND_RETURN_IF_NOT_OK(ParseTablePubsub(&pubsub_channel, pubsub_channel_str));
if (pubsub_channel != TablePubsub::NO_PUBLISH) {
// All other pubsub channels write the data back directly onto the channel.
return PublishTableUpdate(ctx, pubsub_channel_str, id, GcsChangeMode::APPEND_OR_ADD,
data);
} else {
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
}
/// Add an entry at a key. This overwrites any existing data at the key.
/// Publishes a notification about the update to all subscribers, if a pubsub
/// channel is provided.
///
/// This is called from a client with the command:
///
/// RAY.TABLE_ADD <table_prefix> <pubsub_channel> <id> <data>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for
/// this key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to set.
/// \param data The data to insert at the key.
/// \return The current value at the key, or OK if there is no value.
int TableAdd_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
TableAdd_DoWrite(ctx, argv, argc, /*mutated_key_str=*/nullptr);
return TableAdd_DoPublish(ctx, argv, argc);
}
#if RAY_USE_NEW_GCS
int ChainTableAdd_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
return module.ChainReplicate(ctx, argv, argc, /*node_func=*/TableAdd_DoWrite,
/*tail_func=*/TableAdd_DoPublish);
}
#endif
int TableAppend_DoWrite(RedisModuleCtx *ctx, RedisModuleString **argv, int argc,
RedisModuleString **mutated_key_str) {
if (argc < 5 || argc > 6) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
RedisModuleString *index_str = nullptr;
if (argc == 6) {
index_str = argv[5];
}
// Set the keys in the table.
RedisModuleKey *key;
REPLY_AND_RETURN_IF_NOT_OK(OpenPrefixedKey(
&key, ctx, prefix_str, id, REDISMODULE_READ | REDISMODULE_WRITE, mutated_key_str));
int type = RedisModule_KeyType(key);
REPLY_AND_RETURN_IF_FALSE(
type == REDISMODULE_KEYTYPE_LIST || type == REDISMODULE_KEYTYPE_EMPTY,
"TABLE_APPEND entries must be a list or an empty list");
// Determine the index at which the data should be appended. If no index is
// requested, then is the current length of the log.
size_t index = RedisModule_ValueLength(key);
if (index_str != nullptr) {
// Parse the requested index.
long long requested_index;
REPLY_AND_RETURN_IF_FALSE(
RedisModule_StringToLongLong(index_str, &requested_index) == REDISMODULE_OK,
"Index is not a number.");
REPLY_AND_RETURN_IF_FALSE(requested_index >= 0, "Index is less than 0.");
index = static_cast<size_t>(requested_index);
}
// Only perform the append if the requested index matches the current length
// of the log, or if no index was requested.
if (index == RedisModule_ValueLength(key)) {
// The requested index matches the current length of the log or no index
// was requested. Perform the append.
if (RedisModule_ListPush(key, REDISMODULE_LIST_TAIL, data) == REDISMODULE_OK) {
return REDISMODULE_OK;
} else {
static const char *reply = "Unexpected error during TABLE_APPEND";
RedisModule_ReplyWithError(ctx, reply);
return REDISMODULE_ERR;
}
} else {
// The requested index did not match the current length of the log. Return
// an error message as a string.
static const char *reply = "ERR entry exists";
RedisModule_ReplyWithSimpleString(ctx, reply);
return REDISMODULE_ERR;
}
}
int TableAppend_DoPublish(RedisModuleCtx *ctx, RedisModuleString **argv, int /*argc*/) {
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
// Publish a message on the requested pubsub channel if necessary.
TablePubsub pubsub_channel;
REPLY_AND_RETURN_IF_NOT_OK(ParseTablePubsub(&pubsub_channel, pubsub_channel_str));
if (pubsub_channel != TablePubsub::NO_PUBLISH) {
// All other pubsub channels write the data back directly onto the
// channel.
return PublishTableUpdate(ctx, pubsub_channel_str, id, GcsChangeMode::APPEND_OR_ADD,
data);
} else {
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
}
/// Append an entry to the log stored at a key. Publishes a notification about
/// the update to all subscribers, if a pubsub channel is provided.
///
/// This is called from a client with the command:
//
/// RAY.TABLE_APPEND <table_prefix> <pubsub_channel> <id> <data>
/// <index (optional)>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for this
/// key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to append to.
/// \param data The data to append to the key.
/// \param index If this is set, then the data must be appended at this index.
/// If the current log is shorter or longer than the requested index, then the
/// append will fail and an error message will be returned as a string.
/// \return OK if the append succeeds, or an error message string if the append
/// fails.
int TableAppend_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (TableAppend_DoWrite(ctx, argv, argc, /*mutated_key_str=*/nullptr) !=
REDISMODULE_OK) {
return REDISMODULE_ERR;
}
return TableAppend_DoPublish(ctx, argv, argc);
}
#if RAY_USE_NEW_GCS
int ChainTableAppend_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv,
int argc) {
return module.ChainReplicate(ctx, argv, argc,
/*node_func=*/TableAppend_DoWrite,
/*tail_func=*/TableAppend_DoPublish);
}
#endif
int Set_DoPublish(RedisModuleCtx *ctx, RedisModuleString **argv, bool is_add) {
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
// Publish a message on the requested pubsub channel if necessary.
TablePubsub pubsub_channel;
REPLY_AND_RETURN_IF_NOT_OK(ParseTablePubsub(&pubsub_channel, pubsub_channel_str));
if (pubsub_channel != TablePubsub::NO_PUBLISH) {
// All other pubsub channels write the data back directly onto the
// channel.
return PublishTableUpdate(
ctx, pubsub_channel_str, id,
is_add ? GcsChangeMode::APPEND_OR_ADD : GcsChangeMode::REMOVE, data);
} else {
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
}
int Set_DoWrite(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, bool is_add,
bool *changed) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
RedisModuleString *key_string = PrefixedKeyString(ctx, prefix_str, id);
// TODO(kfstorm): According to https://redis.io/topics/modules-intro,
// set type API is not available yet. We can change RedisModule_Call to
// set type API later.
RedisModuleCallReply *reply =
RedisModule_Call(ctx, is_add ? "SADD" : "SREM", "ss", key_string, data);
if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ERROR) {
*changed = RedisModule_CallReplyInteger(reply) > 0;
if (!is_add && *changed) {
// try to delete the empty set.
RedisModuleKey *key;
REPLY_AND_RETURN_IF_NOT_OK(
OpenPrefixedKey(&key, ctx, prefix_str, id, REDISMODULE_WRITE));
auto size = RedisModule_ValueLength(key);
if (size == 0) {
REPLY_AND_RETURN_IF_FALSE(RedisModule_DeleteKey(key) == REDISMODULE_OK,
"ERR Failed to delete empty set.");
}
}
return REDISMODULE_OK;
} else {
// the SADD/SREM command failed
RedisModule_ReplyWithCallReply(ctx, reply);
return REDISMODULE_ERR;
}
}
/// Add an entry to the set stored at a key. Publishes a notification about
/// the update to all subscribers, if a pubsub channel is provided.
///
/// This is called from a client with the command:
//
/// RAY.SET_ADD <table_prefix> <pubsub_channel> <id> <data>
///
/// \param table_prefix The prefix string for keys in this set.
/// \param pubsub_channel The pubsub channel name that notifications for this
/// key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to add to.
/// \param data The data to add to the key.
/// \return OK if the add succeeds, or an error message string if the add fails.
int SetAdd_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
bool changed;
if (Set_DoWrite(ctx, argv, argc, /*is_add=*/true, &changed) != REDISMODULE_OK) {
return REDISMODULE_ERR;
}
if (changed) {
return Set_DoPublish(ctx, argv, /*is_add=*/true);
}
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
/// Remove an entry from the set stored at a key. Publishes a notification about
/// the update to all subscribers, if a pubsub channel is provided.
///
/// This is called from a client with the command:
//
/// RAY.SET_REMOVE <table_prefix> <pubsub_channel> <id> <data>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for this
/// key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to remove from.
/// \param data The data to remove from the key.
/// \return OK if the remove succeeds, or an error message string if the remove
/// fails.
int SetRemove_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
bool changed;
if (Set_DoWrite(ctx, argv, argc, /*is_add=*/false, &changed) != REDISMODULE_OK) {
return REDISMODULE_ERR;
}
if (changed) {
return Set_DoPublish(ctx, argv, /*is_add=*/false);
} else {
RAY_LOG(ERROR) << "The entry to remove doesn't exist.";
}
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
int Hash_DoPublish(RedisModuleCtx *ctx, RedisModuleString **argv) {
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *data = argv[4];
// Publish a message on the requested pubsub channel if necessary.
TablePubsub pubsub_channel;
REPLY_AND_RETURN_IF_NOT_OK(ParseTablePubsub(&pubsub_channel, pubsub_channel_str));
if (pubsub_channel != TablePubsub::NO_PUBLISH) {
// All other pubsub channels write the data back directly onto the
// channel.
return PublishDataHelper(ctx, pubsub_channel_str, id, data);
} else {
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
}
/// Do the hash table write operation. This is called from by HashUpdate_RedisCommand.
///
/// \param change_mode Output the mode of the operation: APPEND_OR_ADD or REMOVE.
/// \param deleted_data Output data if the deleted data is not the same as required.
int HashUpdate_DoWrite(RedisModuleCtx *ctx, RedisModuleString **argv, int argc,
GcsChangeMode *change_mode, RedisModuleString **changed_data) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *id = argv[3];
RedisModuleString *update_data = argv[4];
RedisModuleKey *key;
REPLY_AND_RETURN_IF_NOT_OK(OpenPrefixedKey(
&key, ctx, prefix_str, id, REDISMODULE_READ | REDISMODULE_WRITE, nullptr));
int type = RedisModule_KeyType(key);
REPLY_AND_RETURN_IF_FALSE(
type == REDISMODULE_KEYTYPE_HASH || type == REDISMODULE_KEYTYPE_EMPTY,
"HashUpdate_DoWrite: entries must be a hash or an empty hash");
size_t update_data_len = 0;
const char *update_data_buf = RedisModule_StringPtrLen(update_data, &update_data_len);
GcsEntry gcs_entry;
gcs_entry.ParseFromArray(update_data_buf, update_data_len);
*change_mode = gcs_entry.change_mode();
if (*change_mode == GcsChangeMode::APPEND_OR_ADD) {
// This code path means they are updating command.
size_t total_size = gcs_entry.entries_size();
REPLY_AND_RETURN_IF_FALSE(total_size % 2 == 0, "Invalid Hash Update data vector.");
for (size_t i = 0; i < total_size; i += 2) {
// Reconstruct a key-value pair from a flattened list.
RedisModuleString *entry_key = RedisModule_CreateString(
ctx, gcs_entry.entries(i).data(), gcs_entry.entries(i).size());
RedisModuleString *entry_value = RedisModule_CreateString(
ctx, gcs_entry.entries(i + 1).data(), gcs_entry.entries(i + 1).size());
// Returning 0 if key exists(still updated), 1 if the key is created.
RAY_IGNORE_EXPR(
RedisModule_HashSet(key, REDISMODULE_HASH_NONE, entry_key, entry_value, NULL));
}
*changed_data = update_data;
} else {
// This code path means the command wants to remove the entries.
GcsEntry updated;
updated.set_id(gcs_entry.id());
updated.set_change_mode(gcs_entry.change_mode());
size_t total_size = gcs_entry.entries_size();
for (size_t i = 0; i < total_size; i++) {
RedisModuleString *entry_key = RedisModule_CreateString(
ctx, gcs_entry.entries(i).data(), gcs_entry.entries(i).size());
int deleted_num = RedisModule_HashSet(key, REDISMODULE_HASH_NONE, entry_key,
REDISMODULE_HASH_DELETE, NULL);
if (deleted_num != 0) {
// The corresponding key is removed.
updated.add_entries(gcs_entry.entries(i));
}
}
// Serialize updated data.
std::string str = updated.SerializeAsString();
*changed_data = RedisModule_CreateString(ctx, str.data(), str.size());
auto size = RedisModule_ValueLength(key);
if (size == 0) {
REPLY_AND_RETURN_IF_FALSE(RedisModule_DeleteKey(key) == REDISMODULE_OK,
"ERR Failed to delete empty hash.");
}
}
return REDISMODULE_OK;
}
/// Update entries for a hash table.
///
/// This is called from a client with the command:
//
/// RAY.HASH_UPDATE <table_prefix> <pubsub_channel> <id> <data>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for this
/// key should be published to. When publishing to a specific client, the
/// channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to remove from.
/// \param data The GcsEntry protobuf data used to update this hash table.
/// 1). For deletion, this is a list of keys.
/// 2). For updating, this is a list of pairs with each key followed by the value.
/// \return OK if the remove succeeds, or an error message string if the remove
/// fails.
int HashUpdate_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
GcsChangeMode mode;
RedisModuleString *changed_data = nullptr;
if (HashUpdate_DoWrite(ctx, argv, argc, &mode, &changed_data) != REDISMODULE_OK) {
return REDISMODULE_ERR;
}
// Replace the data with the changed data to do the publish.
std::vector<RedisModuleString *> new_argv(argv, argv + argc);
new_argv[4] = changed_data;
return Hash_DoPublish(ctx, new_argv.data());
}
/// A helper function to create a GcsEntry protobuf, based on the
/// current value or values at the given key.
///
/// \param ctx The Redis module context.
/// \param table_key The Redis key whose entry should be read out. The key must
/// be open when this function is called and may be closed in this function.
/// The key's name format is <prefix_str><entry_id>.
/// \param prefix_str The string prefix associated with the open Redis key.
/// When parsed, this is expected to be a TablePrefix.
/// \param entry_id The UniqueID associated with the open Redis key.
/// \param[out] gcs_entry The created GcsEntry.
Status TableEntryToProtobuf(RedisModuleCtx *ctx, RedisModuleKey *table_key,
RedisModuleString *prefix_str, RedisModuleString *entry_id,
GcsEntry *gcs_entry) {
auto key_type = RedisModule_KeyType(table_key);
switch (key_type) {
case REDISMODULE_KEYTYPE_STRING: {
// Build the GcsEntry from the string data.
CreateGcsEntry(entry_id, GcsChangeMode::APPEND_OR_ADD, {}, gcs_entry);
size_t data_len = 0;
char *data_buf = RedisModule_StringDMA(table_key, &data_len, REDISMODULE_READ);
gcs_entry->add_entries(data_buf, data_len);
} break;
case REDISMODULE_KEYTYPE_LIST:
case REDISMODULE_KEYTYPE_HASH:
case REDISMODULE_KEYTYPE_SET: {
RedisModule_CloseKey(table_key);
// Close the key before executing the command. NOTE(swang): According to
// https://github.com/RedisLabs/RedisModulesSDK/blob/master/API.md, "While
// a key is open, it should only be accessed via the low level key API."
RedisModuleString *table_key_str = PrefixedKeyString(ctx, prefix_str, entry_id);
// TODO(swang): This could potentially be replaced with the native redis
// server list iterator, once it is implemented for redis modules.
RedisModuleCallReply *reply = nullptr;
switch (key_type) {
case REDISMODULE_KEYTYPE_LIST:
reply = RedisModule_Call(ctx, "LRANGE", "sll", table_key_str, 0, -1);
break;
case REDISMODULE_KEYTYPE_SET:
reply = RedisModule_Call(ctx, "SMEMBERS", "s", table_key_str);
break;
case REDISMODULE_KEYTYPE_HASH:
reply = RedisModule_Call(ctx, "HGETALL", "s", table_key_str);
break;
}
// Build the GcsEntry from the set of log entries.
if (reply == nullptr || RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ARRAY) {
return Status::RedisError("Empty list/set/hash or wrong type");
}
CreateGcsEntry(entry_id, GcsChangeMode::APPEND_OR_ADD, {}, gcs_entry);
for (size_t i = 0; i < RedisModule_CallReplyLength(reply); i++) {
RedisModuleCallReply *element = RedisModule_CallReplyArrayElement(reply, i);
size_t len;
const char *element_str = RedisModule_CallReplyStringPtr(element, &len);
gcs_entry->add_entries(element_str, len);
}
} break;
case REDISMODULE_KEYTYPE_EMPTY: {
CreateGcsEntry(entry_id, GcsChangeMode::APPEND_OR_ADD, {}, gcs_entry);
} break;
default:
return Status::RedisError("Invalid Redis type during lookup.");
}
return Status::OK();
}
/// Lookup the current value or values at a key. Returns the current value or
/// values at the key.
///
/// This is called from a client with the command:
//
/// RAY.TABLE_LOOKUP <table_prefix> <pubsub_channel> <id>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for
/// this key should be published to. This field is unused for lookups.
/// \param id The ID of the key to lookup.
/// \return nil if the key is empty, the current value if the key type is a
/// string, or an array of the current values if the key type is a set.
int TableLookup_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc < 4) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *id = argv[3];
// Lookup the data at the key.
RedisModuleKey *table_key;
REPLY_AND_RETURN_IF_NOT_OK(
OpenPrefixedKey(&table_key, ctx, prefix_str, id, REDISMODULE_READ));
if (table_key == nullptr) {
RedisModule_ReplyWithNull(ctx);
} else {
// Serialize the data to a GcsEntry to return to the client.
GcsEntry gcs_entry;
REPLY_AND_RETURN_IF_NOT_OK(
TableEntryToProtobuf(ctx, table_key, prefix_str, id, &gcs_entry));
std::string str = gcs_entry.SerializeAsString();
RedisModule_ReplyWithStringBuffer(ctx, str.data(), str.size());
}
return REDISMODULE_OK;
}
// The deleting helper function.
static Status DeleteKeyHelper(RedisModuleCtx *ctx, RedisModuleString *prefix_str,
RedisModuleString *id_data) {
RedisModuleKey *delete_key = nullptr;
RAY_RETURN_NOT_OK(
OpenPrefixedKey(&delete_key, ctx, prefix_str, id_data, REDISMODULE_READ));
if (delete_key == nullptr) {
return Status::RedisError("Key does not exist.");
}
auto key_type = RedisModule_KeyType(delete_key);
// Set/Hash will delete itself when the length is 0.
if (key_type == REDISMODULE_KEYTYPE_STRING || key_type == REDISMODULE_KEYTYPE_LIST) {
// Current Table or Log only has this two types of entries.
RAY_RETURN_NOT_OK(
OpenPrefixedKey(&delete_key, ctx, prefix_str, id_data, REDISMODULE_WRITE));
RedisModule_DeleteKey(delete_key);
} else {
std::ostringstream ostream;
size_t redis_string_size;
const char *redis_string_str = RedisModule_StringPtrLen(id_data, &redis_string_size);
auto id_binary = std::string(redis_string_str, redis_string_size);
ostream << "Undesired type for RAY.TableDelete: " << key_type
<< " id:" << ray::UniqueID::FromBinary(id_binary);
RAY_LOG(ERROR) << ostream.str();
return Status::RedisError(ostream.str());
}
return Status::OK();
}
/// Delete a list of redis keys in batch mode.
///
/// This is called from a client with the command:
//
/// RAY.TABLE_DELETE <table_prefix> <pubsub_channel> <id> <data>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel Unused but follow the interface.
/// \param id This id will be ignored but follow the interface.
/// \param data The list of Unique Ids, kUniqueIDSize bytes for each.
/// \return Always return OK unless the arguments are invalid.
int TableDelete_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *data = argv[4];
size_t len = 0;
const char *data_ptr = nullptr;
data_ptr = RedisModule_StringPtrLen(data, &len);
// The first uint16_t are used to encode the number of ids to delete.
size_t ids_to_delete = *reinterpret_cast<const uint16_t *>(data_ptr);
size_t id_length = (len - sizeof(uint16_t)) / ids_to_delete;
REPLY_AND_RETURN_IF_FALSE((len - sizeof(uint16_t)) % ids_to_delete == 0,
"The deletion data length must be multiple of the ID size");
data_ptr += sizeof(uint16_t);
for (size_t i = 0; i < ids_to_delete; ++i) {
RedisModuleString *id_data =
RedisModule_CreateString(ctx, data_ptr + i * id_length, id_length);
RAY_IGNORE_EXPR(DeleteKeyHelper(ctx, prefix_str, id_data));
}
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
/// Request notifications for changes to a key. Returns the current value or
/// values at the key. Notifications will be sent to the requesting client for
/// every subsequent TABLE_ADD to the key.
///
/// This is called from a client with the command:
//
/// RAY.TABLE_REQUEST_NOTIFICATIONS <table_prefix> <pubsub_channel> <id>
/// <client_id>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for
/// this key should be published to. When publishing to a specific
/// client, the channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to publish notifications for.
/// \param client_id The ID of the client that is being notified.
/// \return nil if the key is empty, the current value if the key type is a
/// string, or an array of the current values if the key type is a set.
int TableRequestNotifications_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv,
int argc) {
if (argc != 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *prefix_str = argv[1];
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *client_id = argv[4];
RedisModuleString *client_channel;
REPLY_AND_RETURN_IF_NOT_OK(
FormatPubsubChannel(&client_channel, ctx, pubsub_channel_str, client_id));
// Add this client to the set of clients that should be notified when there
// are changes to the key.
std::string notification_key;
REPLY_AND_RETURN_IF_NOT_OK(
GetBroadcastKey(ctx, pubsub_channel_str, id, ¬ification_key));
notification_map[notification_key].push_back(RedisString_ToString(client_channel));
// Lookup the current value at the key.
RedisModuleKey *table_key;
REPLY_AND_RETURN_IF_NOT_OK(
OpenPrefixedKey(&table_key, ctx, prefix_str, id, REDISMODULE_READ));
// Publish the current value at the key to the client that is requesting
// notifications. An empty notification will be published if the key is
// empty.
GcsEntry gcs_entry;
REPLY_AND_RETURN_IF_NOT_OK(
TableEntryToProtobuf(ctx, table_key, prefix_str, id, &gcs_entry));
std::string str = gcs_entry.SerializeAsString();
RedisModule_Call(ctx, "PUBLISH", "sb", client_channel, str.data(), str.size());
return RedisModule_ReplyWithNull(ctx);
}
/// Cancel notifications for changes to a key. The client will no longer
/// receive notifications for this key. This does not check if the client
/// first requested notifications before canceling them.
///
/// This is called from a client with the command:
//
/// RAY.TABLE_CANCEL_NOTIFICATIONS <table_prefix> <pubsub_channel> <id>
/// <client_id>
///
/// \param table_prefix The prefix string for keys in this table.
/// \param pubsub_channel The pubsub channel name that notifications for
/// this key should be published to. If publishing to a specific client,
/// then the channel name should be <pubsub_channel>:<client_id>.
/// \param id The ID of the key to publish notifications for.
/// \param client_id The ID of the client to cancel notifications for.
/// \return OK.
int TableCancelNotifications_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv,
int argc) {
if (argc < 5) {
return RedisModule_WrongArity(ctx);
}
RedisModuleString *pubsub_channel_str = argv[2];
RedisModuleString *id = argv[3];
RedisModuleString *client_id = argv[4];
RedisModuleString *client_channel;
REPLY_AND_RETURN_IF_NOT_OK(
FormatPubsubChannel(&client_channel, ctx, pubsub_channel_str, client_id));
// Remove this client from the set of clients that should be notified when
// there are changes to the key.
std::string notification_key;
REPLY_AND_RETURN_IF_NOT_OK(
GetBroadcastKey(ctx, pubsub_channel_str, id, ¬ification_key));
auto it = notification_map.find(notification_key);
if (it != notification_map.end()) {
it->second.erase(std::remove(it->second.begin(), it->second.end(),
RedisString_ToString(client_channel)),
it->second.end());
if (it->second.size() == 0) {
notification_map.erase(it);
}
}
RedisModule_ReplyWithSimpleString(ctx, "OK");
return REDISMODULE_OK;
}
Status IsNil(bool *out, const std::string &data) {
if (data.size() != kUniqueIDSize) {
return Status::RedisError("Size of data doesn't match size of UniqueID");
}
const uint8_t *d = reinterpret_cast<const uint8_t *>(data.data());
for (size_t i = 0; i < kUniqueIDSize; ++i) {
if (d[i] != 255) {
*out = false;
}
}
*out = true;
return Status::OK();
}
std::string DebugString() {
std::stringstream result;
result << "RedisModule:";
result << "\n- NotificationMap.size = " << notification_map.size();
result << std::endl;
return result.str();
}
int DebugString_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
if (argc != 1) {
return RedisModule_WrongArity(ctx);
}
std::string debug_string = DebugString();
return RedisModule_ReplyWithStringBuffer(ctx, debug_string.data(), debug_string.size());
}
}; // namespace internal_redis_commands
// Wrap all Redis commands with Redis' auto memory management.
AUTO_MEMORY(TableAdd_RedisCommand);
AUTO_MEMORY(HashUpdate_RedisCommand);
AUTO_MEMORY(TableAppend_RedisCommand);
AUTO_MEMORY(SetAdd_RedisCommand);
AUTO_MEMORY(SetRemove_RedisCommand);
AUTO_MEMORY(TableLookup_RedisCommand);
AUTO_MEMORY(TableRequestNotifications_RedisCommand);
AUTO_MEMORY(TableDelete_RedisCommand);
AUTO_MEMORY(TableCancelNotifications_RedisCommand);
AUTO_MEMORY(DebugString_RedisCommand);
#if RAY_USE_NEW_GCS
AUTO_MEMORY(ChainTableAdd_RedisCommand);
AUTO_MEMORY(ChainTableAppend_RedisCommand);
#endif
extern "C" {
/// This function must be present on each Redis module. It is used in order to
/// register the commands into the Redis server.
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx, "ray", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_add", TableAdd_RedisCommand,
"write pubsub", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_append", TableAppend_RedisCommand,
"write pubsub", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.set_add", SetAdd_RedisCommand, "write pubsub",
0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.set_remove", SetRemove_RedisCommand,
"write pubsub", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_lookup", TableLookup_RedisCommand,
"readonly", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_delete", TableDelete_RedisCommand,
"write", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.hash_update", HashUpdate_RedisCommand,
"write pubsub", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_request_notifications",
TableRequestNotifications_RedisCommand, "write pubsub", 0,
0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.table_cancel_notifications",
TableCancelNotifications_RedisCommand, "write pubsub", 0,
0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.debug_string", DebugString_RedisCommand,
"readonly", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
#if RAY_USE_NEW_GCS
// Chain-enabled commands that depend on ray-project/credis.
if (RedisModule_CreateCommand(ctx, "ray.chain.table_add", ChainTableAdd_RedisCommand,
"write pubsub", 0, 0, 0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx, "ray.chain.table_append",
ChainTableAppend_RedisCommand, "write pubsub", 0, 0,
0) == REDISMODULE_ERR) {
return REDISMODULE_ERR;
}
#endif
return REDISMODULE_OK;
}
} /// extern "C"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_module/redis_string.h
|
C/C++ Header
|
#ifndef RAY_REDIS_STRING_H_
#define RAY_REDIS_STRING_H_
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "redismodule.h"
/* Format a RedisModuleString.
*
* @param ctx The redis module context.
* @param fmt The format string. This currently supports %S for
* RedisModuleString and %s for null terminated C strings.
* @params ... The parameters to be formated.
* @return The formatted RedisModuleString, needs to be freed by the caller.
*/
RedisModuleString *RedisString_Format(RedisModuleCtx *ctx, const char *fmt, ...) {
RedisModuleString *result = RedisModule_CreateString(ctx, "", 0);
size_t initlen = strlen(fmt);
size_t l;
RedisModuleString *redisstr;
const char *s;
const char *f = fmt;
int i;
va_list ap;
va_start(ap, fmt);
f = fmt; /* Next format specifier byte to process. */
i = initlen;
while (*f) {
char next;
switch (*f) {
case '%':
next = *(f + 1);
f++;
switch (next) {
case 'S':
redisstr = va_arg(ap, RedisModuleString *);
s = RedisModule_StringPtrLen(redisstr, &l);
RedisModule_StringAppendBuffer(ctx, result, s, l);
i += 1;
break;
case 's':
s = va_arg(ap, const char *);
RedisModule_StringAppendBuffer(ctx, result, s, strlen(s));
i += 1;
break;
case 'b':
s = va_arg(ap, const char *);
l = va_arg(ap, size_t);
RedisModule_StringAppendBuffer(ctx, result, s, l);
i += 1;
break;
default: /* Handle %% and generally %<unknown>. */
RedisModule_StringAppendBuffer(ctx, result, &next, 1);
i += 1;
break;
}
break;
default:
RedisModule_StringAppendBuffer(ctx, result, f, 1);
i += 1;
break;
}
f += 1;
}
va_end(ap);
return result;
}
std::string RedisString_ToString(RedisModuleString *string) {
size_t size;
const char *data = RedisModule_StringPtrLen(string, &size);
return std::string(data, size);
}
#endif // RAY_REDIS_STRING_H_
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/redis_module/redismodule.h
|
C/C++ Header
|
#ifndef REDISMODULE_H
#define REDISMODULE_H
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
/* ---------------- Defines common between core and modules --------------- */
/* Error status return values. */
#define REDISMODULE_OK 0
#define REDISMODULE_ERR 1
/* API versions. */
#define REDISMODULE_APIVER_1 1
/* API flags and constants */
#define REDISMODULE_READ (1 << 0)
#define REDISMODULE_WRITE (1 << 1)
#define REDISMODULE_LIST_HEAD 0
#define REDISMODULE_LIST_TAIL 1
/* Key types. */
#define REDISMODULE_KEYTYPE_EMPTY 0
#define REDISMODULE_KEYTYPE_STRING 1
#define REDISMODULE_KEYTYPE_LIST 2
#define REDISMODULE_KEYTYPE_HASH 3
#define REDISMODULE_KEYTYPE_SET 4
#define REDISMODULE_KEYTYPE_ZSET 5
#define REDISMODULE_KEYTYPE_MODULE 6
/* Reply types. */
#define REDISMODULE_REPLY_UNKNOWN -1
#define REDISMODULE_REPLY_STRING 0
#define REDISMODULE_REPLY_ERROR 1
#define REDISMODULE_REPLY_INTEGER 2
#define REDISMODULE_REPLY_ARRAY 3
#define REDISMODULE_REPLY_NULL 4
/* Postponed array length. */
#define REDISMODULE_POSTPONED_ARRAY_LEN -1
/* Expire */
#define REDISMODULE_NO_EXPIRE -1
/* Sorted set API flags. */
#define REDISMODULE_ZADD_XX (1 << 0)
#define REDISMODULE_ZADD_NX (1 << 1)
#define REDISMODULE_ZADD_ADDED (1 << 2)
#define REDISMODULE_ZADD_UPDATED (1 << 3)
#define REDISMODULE_ZADD_NOP (1 << 4)
/* Hash API flags. */
#define REDISMODULE_HASH_NONE 0
#define REDISMODULE_HASH_NX (1 << 0)
#define REDISMODULE_HASH_XX (1 << 1)
#define REDISMODULE_HASH_CFIELDS (1 << 2)
#define REDISMODULE_HASH_EXISTS (1 << 3)
/* A special pointer that we can use between the core and the module to signal
* field deletion, and that is impossible to be a valid pointer. */
#define REDISMODULE_HASH_DELETE ((RedisModuleString *)(long)1)
/* Error messages. */
#define REDISMODULE_ERRORMSG_WRONGTYPE \
"WRONGTYPE Operation against a key holding the wrong kind of value"
#define REDISMODULE_POSITIVE_INFINITE (1.0 / 0.0)
#define REDISMODULE_NEGATIVE_INFINITE (-1.0 / 0.0)
#define REDISMODULE_NOT_USED(V) ((void)V)
/* ------------------------- End of common defines ------------------------ */
#ifndef REDISMODULE_CORE
typedef long long mstime_t;
/* Incomplete structures for compiler checks but opaque access. */
typedef struct RedisModuleCtx RedisModuleCtx;
typedef struct RedisModuleKey RedisModuleKey;
typedef struct RedisModuleString RedisModuleString;
typedef struct RedisModuleCallReply RedisModuleCallReply;
typedef struct RedisModuleIO RedisModuleIO;
typedef struct RedisModuleType RedisModuleType;
typedef struct RedisModuleDigest RedisModuleDigest;
typedef struct RedisModuleBlockedClient RedisModuleBlockedClient;
typedef int (*RedisModuleCmdFunc)(RedisModuleCtx *ctx, RedisModuleString **argv,
int argc);
typedef void *(*RedisModuleTypeLoadFunc)(RedisModuleIO *rdb, int encver);
typedef void (*RedisModuleTypeSaveFunc)(RedisModuleIO *rdb, void *value);
typedef void (*RedisModuleTypeRewriteFunc)(RedisModuleIO *aof, RedisModuleString *key,
void *value);
typedef size_t (*RedisModuleTypeMemUsageFunc)(void *value);
typedef void (*RedisModuleTypeDigestFunc)(RedisModuleDigest *digest, void *value);
typedef void (*RedisModuleTypeFreeFunc)(void *value);
#define REDISMODULE_TYPE_METHOD_VERSION 1
typedef struct RedisModuleTypeMethods {
uint64_t version;
RedisModuleTypeLoadFunc rdb_load;
RedisModuleTypeSaveFunc rdb_save;
RedisModuleTypeRewriteFunc aof_rewrite;
RedisModuleTypeMemUsageFunc mem_usage;
RedisModuleTypeDigestFunc digest;
RedisModuleTypeFreeFunc free;
} RedisModuleTypeMethods;
#define REDISMODULE_GET_API(name) \
RedisModule_GetApi("RedisModule_" #name, ((void **)&RedisModule_##name))
#define REDISMODULE_API_FUNC(x) (*x)
void *REDISMODULE_API_FUNC(RedisModule_Alloc)(size_t bytes);
void *REDISMODULE_API_FUNC(RedisModule_Realloc)(void *ptr, size_t bytes);
void REDISMODULE_API_FUNC(RedisModule_Free)(void *ptr);
void *REDISMODULE_API_FUNC(RedisModule_Calloc)(size_t nmemb, size_t size);
char *REDISMODULE_API_FUNC(RedisModule_Strdup)(const char *str);
int REDISMODULE_API_FUNC(RedisModule_GetApi)(const char *, void *);
int REDISMODULE_API_FUNC(RedisModule_CreateCommand)(RedisModuleCtx *ctx, const char *name,
RedisModuleCmdFunc cmdfunc,
const char *strflags, int firstkey,
int lastkey, int keystep);
int REDISMODULE_API_FUNC(RedisModule_SetModuleAttribs)(RedisModuleCtx *ctx,
const char *name, int ver,
int apiver);
int REDISMODULE_API_FUNC(RedisModule_WrongArity)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithLongLong)(RedisModuleCtx *ctx,
long long ll);
int REDISMODULE_API_FUNC(RedisModule_GetSelectedDb)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_SelectDb)(RedisModuleCtx *ctx, int newid);
void *REDISMODULE_API_FUNC(RedisModule_OpenKey)(RedisModuleCtx *ctx,
RedisModuleString *keyname, int mode);
void REDISMODULE_API_FUNC(RedisModule_CloseKey)(RedisModuleKey *kp);
int REDISMODULE_API_FUNC(RedisModule_KeyType)(RedisModuleKey *kp);
size_t REDISMODULE_API_FUNC(RedisModule_ValueLength)(RedisModuleKey *kp);
int REDISMODULE_API_FUNC(RedisModule_ListPush)(RedisModuleKey *kp, int where,
RedisModuleString *ele);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ListPop)(RedisModuleKey *key,
int where);
RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_Call)(RedisModuleCtx *ctx,
const char *cmdname,
const char *fmt, ...);
const char *REDISMODULE_API_FUNC(RedisModule_CallReplyProto)(RedisModuleCallReply *reply,
size_t *len);
void REDISMODULE_API_FUNC(RedisModule_FreeCallReply)(RedisModuleCallReply *reply);
int REDISMODULE_API_FUNC(RedisModule_CallReplyType)(RedisModuleCallReply *reply);
long long REDISMODULE_API_FUNC(RedisModule_CallReplyInteger)(RedisModuleCallReply *reply);
size_t REDISMODULE_API_FUNC(RedisModule_CallReplyLength)(RedisModuleCallReply *reply);
RedisModuleCallReply *REDISMODULE_API_FUNC(RedisModule_CallReplyArrayElement)(
RedisModuleCallReply *reply, size_t idx);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateString)(RedisModuleCtx *ctx,
const char *ptr,
size_t len);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromLongLong)(
RedisModuleCtx *ctx, long long ll);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromString)(
RedisModuleCtx *ctx, const RedisModuleString *str);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringPrintf)(
RedisModuleCtx *ctx, const char *fmt, ...);
void REDISMODULE_API_FUNC(RedisModule_FreeString)(RedisModuleCtx *ctx,
RedisModuleString *str);
const char *REDISMODULE_API_FUNC(RedisModule_StringPtrLen)(const RedisModuleString *str,
size_t *len);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithError)(RedisModuleCtx *ctx,
const char *err);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithSimpleString)(RedisModuleCtx *ctx,
const char *msg);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithArray)(RedisModuleCtx *ctx, long len);
void REDISMODULE_API_FUNC(RedisModule_ReplySetArrayLength)(RedisModuleCtx *ctx, long len);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithStringBuffer)(RedisModuleCtx *ctx,
const char *buf, size_t len);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithString)(RedisModuleCtx *ctx,
RedisModuleString *str);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithNull)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithDouble)(RedisModuleCtx *ctx, double d);
int REDISMODULE_API_FUNC(RedisModule_ReplyWithCallReply)(RedisModuleCtx *ctx,
RedisModuleCallReply *reply);
int REDISMODULE_API_FUNC(RedisModule_StringToLongLong)(const RedisModuleString *str,
long long *ll);
int REDISMODULE_API_FUNC(RedisModule_StringToDouble)(const RedisModuleString *str,
double *d);
void REDISMODULE_API_FUNC(RedisModule_AutoMemory)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_Replicate)(RedisModuleCtx *ctx, const char *cmdname,
const char *fmt, ...);
int REDISMODULE_API_FUNC(RedisModule_ReplicateVerbatim)(RedisModuleCtx *ctx);
const char *REDISMODULE_API_FUNC(RedisModule_CallReplyStringPtr)(
RedisModuleCallReply *reply, size_t *len);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_CreateStringFromCallReply)(
RedisModuleCallReply *reply);
int REDISMODULE_API_FUNC(RedisModule_DeleteKey)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_StringSet)(RedisModuleKey *key,
RedisModuleString *str);
char *REDISMODULE_API_FUNC(RedisModule_StringDMA)(RedisModuleKey *key, size_t *len,
int mode);
int REDISMODULE_API_FUNC(RedisModule_StringTruncate)(RedisModuleKey *key, size_t newlen);
mstime_t REDISMODULE_API_FUNC(RedisModule_GetExpire)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_SetExpire)(RedisModuleKey *key, mstime_t expire);
int REDISMODULE_API_FUNC(RedisModule_ZsetAdd)(RedisModuleKey *key, double score,
RedisModuleString *ele, int *flagsptr);
int REDISMODULE_API_FUNC(RedisModule_ZsetIncrby)(RedisModuleKey *key, double score,
RedisModuleString *ele, int *flagsptr,
double *newscore);
int REDISMODULE_API_FUNC(RedisModule_ZsetScore)(RedisModuleKey *key,
RedisModuleString *ele, double *score);
int REDISMODULE_API_FUNC(RedisModule_ZsetRem)(RedisModuleKey *key, RedisModuleString *ele,
int *deleted);
void REDISMODULE_API_FUNC(RedisModule_ZsetRangeStop)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInScoreRange)(RedisModuleKey *key,
double min, double max,
int minex, int maxex);
int REDISMODULE_API_FUNC(RedisModule_ZsetLastInScoreRange)(RedisModuleKey *key,
double min, double max,
int minex, int maxex);
int REDISMODULE_API_FUNC(RedisModule_ZsetFirstInLexRange)(RedisModuleKey *key,
RedisModuleString *min,
RedisModuleString *max);
int REDISMODULE_API_FUNC(RedisModule_ZsetLastInLexRange)(RedisModuleKey *key,
RedisModuleString *min,
RedisModuleString *max);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_ZsetRangeCurrentElement)(
RedisModuleKey *key, double *score);
int REDISMODULE_API_FUNC(RedisModule_ZsetRangeNext)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_ZsetRangePrev)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_ZsetRangeEndReached)(RedisModuleKey *key);
int REDISMODULE_API_FUNC(RedisModule_HashSet)(RedisModuleKey *key, int flags, ...);
int REDISMODULE_API_FUNC(RedisModule_HashGet)(RedisModuleKey *key, int flags, ...);
int REDISMODULE_API_FUNC(RedisModule_IsKeysPositionRequest)(RedisModuleCtx *ctx);
void REDISMODULE_API_FUNC(RedisModule_KeyAtPos)(RedisModuleCtx *ctx, int pos);
unsigned long long REDISMODULE_API_FUNC(RedisModule_GetClientId)(RedisModuleCtx *ctx);
void *REDISMODULE_API_FUNC(RedisModule_PoolAlloc)(RedisModuleCtx *ctx, size_t bytes);
RedisModuleType *REDISMODULE_API_FUNC(RedisModule_CreateDataType)(
RedisModuleCtx *ctx, const char *name, int encver,
RedisModuleTypeMethods *typemethods);
int REDISMODULE_API_FUNC(RedisModule_ModuleTypeSetValue)(RedisModuleKey *key,
RedisModuleType *mt,
void *value);
RedisModuleType *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetType)(RedisModuleKey *key);
void *REDISMODULE_API_FUNC(RedisModule_ModuleTypeGetValue)(RedisModuleKey *key);
void REDISMODULE_API_FUNC(RedisModule_SaveUnsigned)(RedisModuleIO *io, uint64_t value);
uint64_t REDISMODULE_API_FUNC(RedisModule_LoadUnsigned)(RedisModuleIO *io);
void REDISMODULE_API_FUNC(RedisModule_SaveSigned)(RedisModuleIO *io, int64_t value);
int64_t REDISMODULE_API_FUNC(RedisModule_LoadSigned)(RedisModuleIO *io);
void REDISMODULE_API_FUNC(RedisModule_EmitAOF)(RedisModuleIO *io, const char *cmdname,
const char *fmt, ...);
void REDISMODULE_API_FUNC(RedisModule_SaveString)(RedisModuleIO *io,
RedisModuleString *s);
void REDISMODULE_API_FUNC(RedisModule_SaveStringBuffer)(RedisModuleIO *io,
const char *str, size_t len);
RedisModuleString *REDISMODULE_API_FUNC(RedisModule_LoadString)(RedisModuleIO *io);
char *REDISMODULE_API_FUNC(RedisModule_LoadStringBuffer)(RedisModuleIO *io,
size_t *lenptr);
void REDISMODULE_API_FUNC(RedisModule_SaveDouble)(RedisModuleIO *io, double value);
double REDISMODULE_API_FUNC(RedisModule_LoadDouble)(RedisModuleIO *io);
void REDISMODULE_API_FUNC(RedisModule_SaveFloat)(RedisModuleIO *io, float value);
float REDISMODULE_API_FUNC(RedisModule_LoadFloat)(RedisModuleIO *io);
void REDISMODULE_API_FUNC(RedisModule_Log)(RedisModuleCtx *ctx, const char *level,
const char *fmt, ...);
void REDISMODULE_API_FUNC(RedisModule_LogIOError)(RedisModuleIO *io, const char *levelstr,
const char *fmt, ...);
int REDISMODULE_API_FUNC(RedisModule_StringAppendBuffer)(RedisModuleCtx *ctx,
RedisModuleString *str,
const char *buf, size_t len);
void REDISMODULE_API_FUNC(RedisModule_RetainString)(RedisModuleCtx *ctx,
RedisModuleString *str);
int REDISMODULE_API_FUNC(RedisModule_StringCompare)(RedisModuleString *a,
RedisModuleString *b);
RedisModuleCtx *REDISMODULE_API_FUNC(RedisModule_GetContextFromIO)(RedisModuleIO *io);
RedisModuleBlockedClient *REDISMODULE_API_FUNC(RedisModule_BlockClient)(
RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback,
RedisModuleCmdFunc timeout_callback, void (*free_privdata)(void *),
long long timeout_ms);
int REDISMODULE_API_FUNC(RedisModule_UnblockClient)(RedisModuleBlockedClient *bc,
void *privdata);
int REDISMODULE_API_FUNC(RedisModule_IsBlockedReplyRequest)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_IsBlockedTimeoutRequest)(RedisModuleCtx *ctx);
void *REDISMODULE_API_FUNC(RedisModule_GetBlockedClientPrivateData)(RedisModuleCtx *ctx);
int REDISMODULE_API_FUNC(RedisModule_AbortBlock)(RedisModuleBlockedClient *bc);
long long REDISMODULE_API_FUNC(RedisModule_Milliseconds)(void);
/* This is included inline inside each Redis module. */
static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver)
__attribute__((unused));
static int RedisModule_Init(RedisModuleCtx *ctx, const char *name, int ver, int apiver) {
void *getapifuncptr = ((void **)ctx)[0];
RedisModule_GetApi = (int (*)(const char *, void *))getapifuncptr;
REDISMODULE_GET_API(Alloc);
REDISMODULE_GET_API(Calloc);
REDISMODULE_GET_API(Free);
REDISMODULE_GET_API(Realloc);
REDISMODULE_GET_API(Strdup);
REDISMODULE_GET_API(CreateCommand);
REDISMODULE_GET_API(SetModuleAttribs);
REDISMODULE_GET_API(WrongArity);
REDISMODULE_GET_API(ReplyWithLongLong);
REDISMODULE_GET_API(ReplyWithError);
REDISMODULE_GET_API(ReplyWithSimpleString);
REDISMODULE_GET_API(ReplyWithArray);
REDISMODULE_GET_API(ReplySetArrayLength);
REDISMODULE_GET_API(ReplyWithStringBuffer);
REDISMODULE_GET_API(ReplyWithString);
REDISMODULE_GET_API(ReplyWithNull);
REDISMODULE_GET_API(ReplyWithCallReply);
REDISMODULE_GET_API(ReplyWithDouble);
REDISMODULE_GET_API(ReplySetArrayLength);
REDISMODULE_GET_API(GetSelectedDb);
REDISMODULE_GET_API(SelectDb);
REDISMODULE_GET_API(OpenKey);
REDISMODULE_GET_API(CloseKey);
REDISMODULE_GET_API(KeyType);
REDISMODULE_GET_API(ValueLength);
REDISMODULE_GET_API(ListPush);
REDISMODULE_GET_API(ListPop);
REDISMODULE_GET_API(StringToLongLong);
REDISMODULE_GET_API(StringToDouble);
REDISMODULE_GET_API(Call);
REDISMODULE_GET_API(CallReplyProto);
REDISMODULE_GET_API(FreeCallReply);
REDISMODULE_GET_API(CallReplyInteger);
REDISMODULE_GET_API(CallReplyType);
REDISMODULE_GET_API(CallReplyLength);
REDISMODULE_GET_API(CallReplyArrayElement);
REDISMODULE_GET_API(CallReplyStringPtr);
REDISMODULE_GET_API(CreateStringFromCallReply);
REDISMODULE_GET_API(CreateString);
REDISMODULE_GET_API(CreateStringFromLongLong);
REDISMODULE_GET_API(CreateStringFromString);
REDISMODULE_GET_API(CreateStringPrintf);
REDISMODULE_GET_API(FreeString);
REDISMODULE_GET_API(StringPtrLen);
REDISMODULE_GET_API(AutoMemory);
REDISMODULE_GET_API(Replicate);
REDISMODULE_GET_API(ReplicateVerbatim);
REDISMODULE_GET_API(DeleteKey);
REDISMODULE_GET_API(StringSet);
REDISMODULE_GET_API(StringDMA);
REDISMODULE_GET_API(StringTruncate);
REDISMODULE_GET_API(GetExpire);
REDISMODULE_GET_API(SetExpire);
REDISMODULE_GET_API(ZsetAdd);
REDISMODULE_GET_API(ZsetIncrby);
REDISMODULE_GET_API(ZsetScore);
REDISMODULE_GET_API(ZsetRem);
REDISMODULE_GET_API(ZsetRangeStop);
REDISMODULE_GET_API(ZsetFirstInScoreRange);
REDISMODULE_GET_API(ZsetLastInScoreRange);
REDISMODULE_GET_API(ZsetFirstInLexRange);
REDISMODULE_GET_API(ZsetLastInLexRange);
REDISMODULE_GET_API(ZsetRangeCurrentElement);
REDISMODULE_GET_API(ZsetRangeNext);
REDISMODULE_GET_API(ZsetRangePrev);
REDISMODULE_GET_API(ZsetRangeEndReached);
REDISMODULE_GET_API(HashSet);
REDISMODULE_GET_API(HashGet);
REDISMODULE_GET_API(IsKeysPositionRequest);
REDISMODULE_GET_API(KeyAtPos);
REDISMODULE_GET_API(GetClientId);
REDISMODULE_GET_API(PoolAlloc);
REDISMODULE_GET_API(CreateDataType);
REDISMODULE_GET_API(ModuleTypeSetValue);
REDISMODULE_GET_API(ModuleTypeGetType);
REDISMODULE_GET_API(ModuleTypeGetValue);
REDISMODULE_GET_API(SaveUnsigned);
REDISMODULE_GET_API(LoadUnsigned);
REDISMODULE_GET_API(SaveSigned);
REDISMODULE_GET_API(LoadSigned);
REDISMODULE_GET_API(SaveString);
REDISMODULE_GET_API(SaveStringBuffer);
REDISMODULE_GET_API(LoadString);
REDISMODULE_GET_API(LoadStringBuffer);
REDISMODULE_GET_API(SaveDouble);
REDISMODULE_GET_API(LoadDouble);
REDISMODULE_GET_API(SaveFloat);
REDISMODULE_GET_API(LoadFloat);
REDISMODULE_GET_API(EmitAOF);
REDISMODULE_GET_API(Log);
REDISMODULE_GET_API(LogIOError);
REDISMODULE_GET_API(StringAppendBuffer);
REDISMODULE_GET_API(RetainString);
REDISMODULE_GET_API(StringCompare);
REDISMODULE_GET_API(GetContextFromIO);
REDISMODULE_GET_API(BlockClient);
REDISMODULE_GET_API(UnblockClient);
REDISMODULE_GET_API(IsBlockedReplyRequest);
REDISMODULE_GET_API(IsBlockedTimeoutRequest);
REDISMODULE_GET_API(GetBlockedClientPrivateData);
REDISMODULE_GET_API(AbortBlock);
REDISMODULE_GET_API(Milliseconds);
RedisModule_SetModuleAttribs(ctx, name, ver, apiver);
return REDISMODULE_OK;
}
#else
/* Things only defined for the modules core, not exported to modules
* including this file. */
#define RedisModuleString robj
#endif /* REDISMODULE_CORE */
#endif /* REDISMOUDLE_H */
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/subscription_executor.cc
|
C++
|
#include "ray/gcs/subscription_executor.h"
namespace ray {
namespace gcs {
template <typename ID, typename Data, typename Table>
Status SubscriptionExecutor<ID, Data, Table>::AsyncSubscribeAll(
const ClientID &client_id, const SubscribeCallback<ID, Data> &subscribe,
const StatusCallback &done) {
// TODO(micafan) Optimize the lock when necessary.
// Consider avoiding locking in single-threaded processes.
std::unique_lock<std::mutex> lock(mutex_);
if (subscribe_all_callback_ != nullptr) {
RAY_LOG(DEBUG) << "Duplicate subscription! Already subscribed to all elements.";
return Status::Invalid("Duplicate subscription!");
}
if (registration_status_ != RegistrationStatus::kNotRegistered) {
if (subscribe != nullptr) {
RAY_LOG(DEBUG) << "Duplicate subscription! Already subscribed to specific elements"
", can't subscribe to all elements.";
return Status::Invalid("Duplicate subscription!");
}
}
if (registration_status_ == RegistrationStatus::kRegistered) {
// Already registered to GCS, just invoke the `done` callback.
lock.unlock();
if (done != nullptr) {
done(Status::OK());
}
return Status::OK();
}
// Registration to GCS is not finished yet, add the `done` callback to the pending list
// to be invoked when registration is done.
if (done != nullptr) {
pending_subscriptions_.emplace_back(done);
}
// If there's another registration request that's already on-going, then wait for it
// to finish.
if (registration_status_ == RegistrationStatus::kRegistering) {
return Status::OK();
}
auto on_subscribe = [this](RedisGcsClient *client, const ID &id,
const std::vector<Data> &result) {
if (result.empty()) {
return;
}
SubscribeCallback<ID, Data> sub_one_callback = nullptr;
SubscribeCallback<ID, Data> sub_all_callback = nullptr;
{
std::unique_lock<std::mutex> lock(mutex_);
const auto it = id_to_callback_map_.find(id);
if (it != id_to_callback_map_.end()) {
sub_one_callback = it->second;
}
sub_all_callback = subscribe_all_callback_;
}
if (sub_one_callback != nullptr) {
sub_one_callback(id, result.back());
}
if (sub_all_callback != nullptr) {
RAY_CHECK(sub_one_callback == nullptr);
sub_all_callback(id, result.back());
}
};
auto on_done = [this](RedisGcsClient *client) {
std::list<StatusCallback> pending_callbacks;
{
std::unique_lock<std::mutex> lock(mutex_);
registration_status_ = RegistrationStatus::kRegistered;
pending_callbacks.swap(pending_subscriptions_);
RAY_CHECK(pending_subscriptions_.empty());
}
for (const auto &callback : pending_callbacks) {
callback(Status::OK());
}
};
Status status = table_.Subscribe(JobID::Nil(), client_id, on_subscribe, on_done);
if (status.ok()) {
registration_status_ = RegistrationStatus::kRegistering;
subscribe_all_callback_ = subscribe;
}
return status;
}
template <typename ID, typename Data, typename Table>
Status SubscriptionExecutor<ID, Data, Table>::AsyncSubscribe(
const ClientID &client_id, const ID &id, const SubscribeCallback<ID, Data> &subscribe,
const StatusCallback &done) {
RAY_CHECK(client_id != ClientID::Nil());
// NOTE(zhijunfu): `Subscribe` and other operations use different redis contexts,
// thus we need to call `RequestNotifications` in the Subscribe callback to ensure
// it's processed after the `Subscribe` request. Otherwise if `RequestNotifications`
// is processed first we will miss the initial notification.
auto on_subscribe_done = [this, client_id, id, subscribe, done](Status status) {
auto on_request_notification_done = [this, done, id](Status status) {
if (!status.ok()) {
std::unique_lock<std::mutex> lock(mutex_);
id_to_callback_map_.erase(id);
}
if (done != nullptr) {
done(status);
}
};
{
std::unique_lock<std::mutex> lock(mutex_);
status = table_.RequestNotifications(JobID::Nil(), id, client_id,
on_request_notification_done);
if (!status.ok()) {
id_to_callback_map_.erase(id);
}
}
};
{
std::unique_lock<std::mutex> lock(mutex_);
const auto it = id_to_callback_map_.find(id);
if (it != id_to_callback_map_.end()) {
RAY_LOG(DEBUG) << "Duplicate subscription to id " << id << " client_id "
<< client_id;
return Status::Invalid("Duplicate subscription to element!");
}
id_to_callback_map_[id] = subscribe;
}
auto status = AsyncSubscribeAll(client_id, nullptr, on_subscribe_done);
if (!status.ok()) {
std::unique_lock<std::mutex> lock(mutex_);
id_to_callback_map_.erase(id);
}
return status;
}
template <typename ID, typename Data, typename Table>
Status SubscriptionExecutor<ID, Data, Table>::AsyncUnsubscribe(
const ClientID &client_id, const ID &id, const StatusCallback &done) {
SubscribeCallback<ID, Data> subscribe = nullptr;
{
std::unique_lock<std::mutex> lock(mutex_);
const auto it = id_to_callback_map_.find(id);
if (it == id_to_callback_map_.end()) {
RAY_LOG(DEBUG) << "Invalid Unsubscribe! id " << id << " client_id " << client_id;
return Status::Invalid("Invalid Unsubscribe, no existing subscription found.");
}
subscribe = std::move(it->second);
id_to_callback_map_.erase(it);
}
RAY_CHECK(subscribe != nullptr);
auto on_done = [this, id, subscribe, done](Status status) {
if (!status.ok()) {
std::unique_lock<std::mutex> lock(mutex_);
const auto it = id_to_callback_map_.find(id);
if (it != id_to_callback_map_.end()) {
// The initial AsyncUnsubscribe deleted the callback, but the client
// has subscribed again in the meantime. This new callback will be
// called if we receive more notifications.
RAY_LOG(WARNING)
<< "Client called AsyncSubscribe on " << id
<< " while AsyncUnsubscribe was pending, but the unsubscribe failed.";
} else {
// The Unsubscribe failed, so restore the initial callback.
id_to_callback_map_[id] = subscribe;
}
}
if (done != nullptr) {
done(status);
}
};
return table_.CancelNotifications(JobID::Nil(), id, client_id, on_done);
}
template class SubscriptionExecutor<ActorID, ActorTableData, ActorTable>;
template class SubscriptionExecutor<JobID, JobTableData, JobTable>;
template class SubscriptionExecutor<TaskID, TaskTableData, raylet::TaskTable>;
template class SubscriptionExecutor<ObjectID, ObjectChangeNotification, ObjectTable>;
template class SubscriptionExecutor<TaskID, boost::optional<TaskLeaseData>,
TaskLeaseTable>;
template class SubscriptionExecutor<ClientID, ResourceChangeNotification,
DynamicResourceTable>;
template class SubscriptionExecutor<ClientID, HeartbeatTableData, HeartbeatTable>;
template class SubscriptionExecutor<ClientID, HeartbeatBatchTableData,
HeartbeatBatchTable>;
template class SubscriptionExecutor<WorkerID, WorkerFailureData, WorkerFailureTable>;
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/subscription_executor.h
|
C/C++ Header
|
#ifndef RAY_GCS_SUBSCRIPTION_EXECUTOR_H
#define RAY_GCS_SUBSCRIPTION_EXECUTOR_H
#include <atomic>
#include <list>
#include <mutex>
#include "ray/gcs/callback.h"
#include "ray/gcs/tables.h"
namespace ray {
namespace gcs {
/// \class SubscriptionExecutor
/// SubscriptionExecutor class encapsulates the implementation details of
/// subscribe/unsubscribe to elements (e.g.: actors or tasks or objects or nodes).
/// Support subscribing to a specific element or subscribing to all elements.
template <typename ID, typename Data, typename Table>
class SubscriptionExecutor {
public:
explicit SubscriptionExecutor(Table &table) : table_(table) {}
~SubscriptionExecutor() {}
/// Subscribe to operations of all elements.
/// Repeated subscription will return a failure.
///
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each update will be received. Else, only
/// messages for the given client will be received.
/// \param subscribe Callback that will be called each time when an element
/// is registered or updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
Status AsyncSubscribeAll(const ClientID &client_id,
const SubscribeCallback<ID, Data> &subscribe,
const StatusCallback &done);
/// Subscribe to operations of an element.
/// Repeated subscription to an element will return a failure.
///
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each update will be received. Else, only
/// messages for the given client will be received.
/// \param id The id of the element to be subscribe to.
/// \param subscribe Callback that will be called each time when the element
/// is registered or updated.
/// \param done Callback that will be called when subscription is complete.
/// \return Status
Status AsyncSubscribe(const ClientID &client_id, const ID &id,
const SubscribeCallback<ID, Data> &subscribe,
const StatusCallback &done);
/// Cancel subscription to an element.
/// Unsubscribing can only be called after the subscription request is completed.
///
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each update will be received. Else, only
/// messages for the given client will be received.
/// \param id The id of the element to be unsubscribed to.
/// \param done Callback that will be called when cancel subscription is complete.
/// \return Status
Status AsyncUnsubscribe(const ClientID &client_id, const ID &id,
const StatusCallback &done);
private:
Table &table_;
std::mutex mutex_;
enum class RegistrationStatus : uint8_t {
kNotRegistered,
kRegistering,
kRegistered,
};
/// Whether successfully registered subscription to GCS.
RegistrationStatus registration_status_{RegistrationStatus::kNotRegistered};
/// List of subscriptions before registration to GCS is done, these callbacks
/// will be called when the registration to GCS finishes.
std::list<StatusCallback> pending_subscriptions_;
/// Subscribe Callback of all elements.
SubscribeCallback<ID, Data> subscribe_all_callback_{nullptr};
/// A mapping from element ID to subscription callback.
typedef std::unordered_map<ID, SubscribeCallback<ID, Data>> IDToCallbackMap;
IDToCallbackMap id_to_callback_map_;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_SUBSCRIPTION_EXECUTOR_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/tables.cc
|
C++
|
#include "ray/gcs/tables.h"
#include "absl/time/clock.h"
#include "ray/common/common_protocol.h"
#include "ray/common/grpc_util.h"
#include "ray/common/ray_config.h"
#include "ray/gcs/redis_gcs_client.h"
namespace {
static const std::string kTableAppendCommand = "RAY.TABLE_APPEND";
static const std::string kChainTableAppendCommand = "RAY.CHAIN.TABLE_APPEND";
static const std::string kTableAddCommand = "RAY.TABLE_ADD";
static const std::string kChainTableAddCommand = "RAY.CHAIN.TABLE_ADD";
std::string GetLogAppendCommand(const ray::gcs::CommandType command_type) {
if (command_type == ray::gcs::CommandType::kRegular) {
return kTableAppendCommand;
} else {
RAY_CHECK(command_type == ray::gcs::CommandType::kChain);
return kChainTableAppendCommand;
}
}
std::string GetTableAddCommand(const ray::gcs::CommandType command_type) {
if (command_type == ray::gcs::CommandType::kRegular) {
return kTableAddCommand;
} else {
RAY_CHECK(command_type == ray::gcs::CommandType::kChain);
return kChainTableAddCommand;
}
}
} // namespace
namespace ray {
namespace gcs {
template <typename ID, typename Data>
Status Log<ID, Data>::Append(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data,
const WriteCallback &done) {
num_appends_++;
auto callback = [this, id, data, done](std::shared_ptr<CallbackReply> reply) {
const auto status = reply->ReadAsStatus();
// Failed to append the entry.
RAY_CHECK(status.ok()) << "Failed to execute command TABLE_APPEND:"
<< status.ToString();
if (done != nullptr) {
(done)(client_, id, *data);
}
};
std::string str = data->SerializeAsString();
return GetRedisContext(id)->RunAsync(GetLogAppendCommand(command_type_), id, str.data(),
str.length(), prefix_, pubsub_channel_,
std::move(callback));
}
template <typename ID, typename Data>
Status Log<ID, Data>::SyncAppend(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data) {
num_appends_++;
std::string str = data->SerializeAsString();
auto reply =
GetRedisContext(id)->RunSync(GetLogAppendCommand(command_type_), id, str.data(),
str.length(), prefix_, pubsub_channel_);
Status status = reply ? reply->ReadAsStatus() : Status::RedisError("Redis error");
return status;
}
template <typename ID, typename Data>
Status Log<ID, Data>::AppendAt(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data,
const WriteCallback &done, const WriteCallback &failure,
int log_length) {
num_appends_++;
auto callback = [this, id, data, done, failure](std::shared_ptr<CallbackReply> reply) {
const auto status = reply->ReadAsStatus();
if (status.ok()) {
if (done != nullptr) {
(done)(client_, id, *data);
}
} else {
if (failure != nullptr) {
(failure)(client_, id, *data);
}
}
};
std::string str = data->SerializeAsString();
return GetRedisContext(id)->RunAsync(GetLogAppendCommand(command_type_), id, str.data(),
str.length(), prefix_, pubsub_channel_,
std::move(callback), log_length);
}
template <typename ID, typename Data>
Status Log<ID, Data>::Lookup(const JobID &job_id, const ID &id, const Callback &lookup) {
num_lookups_++;
auto callback = [this, id, lookup](std::shared_ptr<CallbackReply> reply) {
if (lookup != nullptr) {
std::vector<Data> results;
if (!reply->IsNil()) {
GcsEntry gcs_entry;
gcs_entry.ParseFromString(reply->ReadAsString());
RAY_CHECK(ID::FromBinary(gcs_entry.id()) == id);
for (int64_t i = 0; i < gcs_entry.entries_size(); i++) {
Data data;
data.ParseFromString(gcs_entry.entries(i));
results.emplace_back(std::move(data));
}
}
lookup(client_, id, results);
}
};
std::vector<uint8_t> nil;
return GetRedisContext(id)->RunAsync("RAY.TABLE_LOOKUP", id, nil.data(), nil.size(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
Status Log<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe,
const SubscriptionCallback &done) {
auto subscribe_wrapper = [subscribe](RedisGcsClient *client, const ID &id,
const GcsChangeMode change_mode,
const std::vector<Data> &data) {
RAY_CHECK(change_mode != GcsChangeMode::REMOVE);
subscribe(client, id, data);
};
return Subscribe(job_id, client_id, subscribe_wrapper, done);
}
template <typename ID, typename Data>
Status Log<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const NotificationCallback &subscribe,
const SubscriptionCallback &done) {
RAY_CHECK(subscribe_callback_index_ == -1)
<< "Client called Subscribe twice on the same table";
auto callback = [this, subscribe, done](std::shared_ptr<CallbackReply> reply) {
const auto data = reply->ReadAsPubsubData();
if (data.empty()) {
// No notification data is provided. This is the callback for the
// initial subscription request.
if (done != nullptr) {
done(client_);
}
} else {
// Data is provided. This is the callback for a message.
if (subscribe != nullptr) {
// Parse the notification.
GcsEntry gcs_entry;
gcs_entry.ParseFromString(data);
ID id = ID::FromBinary(gcs_entry.id());
std::vector<Data> results;
for (int64_t i = 0; i < gcs_entry.entries_size(); i++) {
Data result;
result.ParseFromString(gcs_entry.entries(i));
results.emplace_back(std::move(result));
}
subscribe(client_, id, gcs_entry.change_mode(), results);
}
}
};
subscribe_callback_index_ = 1;
for (auto &context : shard_contexts_) {
RAY_RETURN_NOT_OK(context->SubscribeAsync(client_id, pubsub_channel_, callback,
&subscribe_callback_index_));
}
return Status::OK();
}
template <typename ID, typename Data>
Status Log<ID, Data>::RequestNotifications(const JobID &job_id, const ID &id,
const ClientID &client_id,
const StatusCallback &done) {
RAY_CHECK(subscribe_callback_index_ >= 0)
<< "Client requested notifications on a key before Subscribe completed";
RedisCallback callback = nullptr;
if (done != nullptr) {
callback = [done](std::shared_ptr<CallbackReply> reply) {
const auto status = reply->IsNil()
? Status::OK()
: Status::RedisError("request notifications failed.");
done(status);
};
}
return GetRedisContext(id)->RunAsync("RAY.TABLE_REQUEST_NOTIFICATIONS", id,
client_id.Data(), client_id.Size(), prefix_,
pubsub_channel_, callback);
}
template <typename ID, typename Data>
Status Log<ID, Data>::CancelNotifications(const JobID &job_id, const ID &id,
const ClientID &client_id,
const StatusCallback &done) {
RAY_CHECK(subscribe_callback_index_ >= 0)
<< "Client canceled notifications on a key before Subscribe completed";
RedisCallback callback = nullptr;
if (done != nullptr) {
callback = [done](std::shared_ptr<CallbackReply> reply) {
const auto status = reply->ReadAsStatus();
done(status);
};
}
return GetRedisContext(id)->RunAsync("RAY.TABLE_CANCEL_NOTIFICATIONS", id,
client_id.Data(), client_id.Size(), prefix_,
pubsub_channel_, callback);
}
template <typename ID, typename Data>
void Log<ID, Data>::Delete(const JobID &job_id, const std::vector<ID> &ids) {
if (ids.empty()) {
return;
}
std::unordered_map<RedisContext *, std::ostringstream> sharded_data;
for (const auto &id : ids) {
sharded_data[GetRedisContext(id).get()] << id.Binary();
}
// Breaking really large deletion commands into batches of smaller size.
const size_t batch_size =
RayConfig::instance().maximum_gcs_deletion_batch_size() * ID::Size();
for (const auto &pair : sharded_data) {
std::string current_data = pair.second.str();
for (size_t cur = 0; cur < pair.second.str().size(); cur += batch_size) {
size_t data_field_size = std::min(batch_size, current_data.size() - cur);
uint16_t id_count = data_field_size / ID::Size();
// Send data contains id count and all the id data.
std::string send_data(data_field_size + sizeof(id_count), 0);
uint8_t *buffer = reinterpret_cast<uint8_t *>(&send_data[0]);
*reinterpret_cast<uint16_t *>(buffer) = id_count;
RAY_IGNORE_EXPR(
std::copy_n(reinterpret_cast<const uint8_t *>(current_data.c_str() + cur),
data_field_size, buffer + sizeof(uint16_t)));
RAY_IGNORE_EXPR(
pair.first->RunAsync("RAY.TABLE_DELETE", UniqueID::Nil(),
reinterpret_cast<const uint8_t *>(send_data.c_str()),
send_data.size(), prefix_, pubsub_channel_,
/*redisCallback=*/nullptr));
}
}
}
template <typename ID, typename Data>
void Log<ID, Data>::Delete(const JobID &job_id, const ID &id) {
Delete(job_id, std::vector<ID>({id}));
}
template <typename ID, typename Data>
std::string Log<ID, Data>::DebugString() const {
std::stringstream result;
result << "num lookups: " << num_lookups_ << ", num appends: " << num_appends_;
return result.str();
}
template <typename ID, typename Data>
Status Table<ID, Data>::Add(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data,
const WriteCallback &done) {
num_adds_++;
auto callback = [this, id, data, done](std::shared_ptr<CallbackReply> reply) {
if (done != nullptr) {
(done)(client_, id, *data);
}
};
std::string str = data->SerializeAsString();
return GetRedisContext(id)->RunAsync(GetTableAddCommand(command_type_), id, str.data(),
str.length(), prefix_, pubsub_channel_,
std::move(callback));
}
template <typename ID, typename Data>
Status Table<ID, Data>::Lookup(const JobID &job_id, const ID &id, const Callback &lookup,
const FailureCallback &failure) {
num_lookups_++;
return Log<ID, Data>::Lookup(job_id, id,
[lookup, failure](RedisGcsClient *client, const ID &id,
const std::vector<Data> &data) {
if (data.empty()) {
if (failure != nullptr) {
(failure)(client, id);
}
} else {
RAY_CHECK(data.size() == 1);
if (lookup != nullptr) {
(lookup)(client, id, data[0]);
}
}
});
}
template <typename ID, typename Data>
Status Table<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe,
const FailureCallback &failure,
const SubscriptionCallback &done) {
return Log<ID, Data>::Subscribe(
job_id, client_id,
[subscribe, failure](RedisGcsClient *client, const ID &id,
const std::vector<Data> &data) {
RAY_CHECK(data.empty() || data.size() == 1);
if (data.size() == 1) {
subscribe(client, id, data[0]);
} else {
if (failure != nullptr) {
failure(client, id);
}
}
},
done);
}
template <typename ID, typename Data>
Status Table<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe,
const SubscriptionCallback &done) {
return Subscribe(job_id, client_id, subscribe, /*failure*/ nullptr, done);
}
template <typename ID, typename Data>
std::string Table<ID, Data>::DebugString() const {
std::stringstream result;
result << "num lookups: " << num_lookups_ << ", num adds: " << num_adds_;
return result.str();
}
template <typename ID, typename Data>
Status Set<ID, Data>::Add(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data, const WriteCallback &done) {
num_adds_++;
auto callback = [this, id, data, done](std::shared_ptr<CallbackReply> reply) {
if (done != nullptr) {
(done)(client_, id, *data);
}
};
std::string str = data->SerializeAsString();
return GetRedisContext(id)->RunAsync("RAY.SET_ADD", id, str.data(), str.length(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
Status Set<ID, Data>::Remove(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data,
const WriteCallback &done) {
num_removes_++;
auto callback = [this, id, data, done](std::shared_ptr<CallbackReply> reply) {
if (done != nullptr) {
(done)(client_, id, *data);
}
};
std::string str = data->SerializeAsString();
return GetRedisContext(id)->RunAsync("RAY.SET_REMOVE", id, str.data(), str.length(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
Status Set<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const NotificationCallback &subscribe,
const SubscriptionCallback &done) {
auto on_subscribe = [subscribe](RedisGcsClient *client, const ID &id,
const GcsChangeMode change_mode,
const std::vector<Data> &data) {
ArrayNotification<Data> change_notification(change_mode, data);
std::vector<ArrayNotification<Data>> notification_vec;
notification_vec.emplace_back(std::move(change_notification));
subscribe(client, id, notification_vec);
};
return Log<ID, Data>::Subscribe(job_id, client_id, on_subscribe, done);
}
template <typename ID, typename Data>
std::string Set<ID, Data>::DebugString() const {
std::stringstream result;
result << "num lookups: " << num_lookups_ << ", num adds: " << num_adds_
<< ", num removes: " << num_removes_;
return result.str();
}
template <typename ID, typename Data>
Status Hash<ID, Data>::Update(const JobID &job_id, const ID &id, const DataMap &data_map,
const HashCallback &done) {
num_adds_++;
auto callback = [this, id, data_map, done](std::shared_ptr<CallbackReply> reply) {
if (done != nullptr) {
(done)(client_, id, data_map);
}
};
GcsEntry gcs_entry;
gcs_entry.set_id(id.Binary());
gcs_entry.set_change_mode(GcsChangeMode::APPEND_OR_ADD);
for (const auto &pair : data_map) {
gcs_entry.add_entries(pair.first);
gcs_entry.add_entries(pair.second->SerializeAsString());
}
std::string str = gcs_entry.SerializeAsString();
return GetRedisContext(id)->RunAsync("RAY.HASH_UPDATE", id, str.data(), str.size(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
Status Hash<ID, Data>::RemoveEntries(const JobID &job_id, const ID &id,
const std::vector<std::string> &keys,
const HashRemoveCallback &remove_callback) {
num_removes_++;
auto callback = [this, id, keys,
remove_callback](std::shared_ptr<CallbackReply> reply) {
if (remove_callback != nullptr) {
(remove_callback)(client_, id, keys);
}
};
GcsEntry gcs_entry;
gcs_entry.set_id(id.Binary());
gcs_entry.set_change_mode(GcsChangeMode::REMOVE);
for (const auto &key : keys) {
gcs_entry.add_entries(key);
}
std::string str = gcs_entry.SerializeAsString();
return GetRedisContext(id)->RunAsync("RAY.HASH_UPDATE", id, str.data(), str.size(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
std::string Hash<ID, Data>::DebugString() const {
std::stringstream result;
result << "num lookups: " << num_lookups_ << ", num adds: " << num_adds_
<< ", num removes: " << num_removes_;
return result.str();
}
template <typename ID, typename Data>
Status Hash<ID, Data>::Lookup(const JobID &job_id, const ID &id,
const HashCallback &lookup) {
num_lookups_++;
auto callback = [this, id, lookup](std::shared_ptr<CallbackReply> reply) {
if (lookup != nullptr) {
DataMap results;
if (!reply->IsNil()) {
const auto data = reply->ReadAsString();
GcsEntry gcs_entry;
gcs_entry.ParseFromString(reply->ReadAsString());
RAY_CHECK(ID::FromBinary(gcs_entry.id()) == id);
RAY_CHECK(gcs_entry.entries_size() % 2 == 0);
for (int i = 0; i < gcs_entry.entries_size(); i += 2) {
const auto &key = gcs_entry.entries(i);
const auto value = std::make_shared<Data>();
value->ParseFromString(gcs_entry.entries(i + 1));
results.emplace(key, std::move(value));
}
}
lookup(client_, id, results);
}
};
std::vector<uint8_t> nil;
return GetRedisContext(id)->RunAsync("RAY.TABLE_LOOKUP", id, nil.data(), nil.size(),
prefix_, pubsub_channel_, std::move(callback));
}
template <typename ID, typename Data>
Status Hash<ID, Data>::Subscribe(const JobID &job_id, const ClientID &client_id,
const HashNotificationCallback &subscribe,
const SubscriptionCallback &done) {
RAY_CHECK(subscribe_callback_index_ == -1)
<< "Client called Subscribe twice on the same table";
auto callback = [this, subscribe, done](std::shared_ptr<CallbackReply> reply) {
const auto data = reply->ReadAsPubsubData();
if (data.empty()) {
// No notification data is provided. This is the callback for the
// initial subscription request.
if (done != nullptr) {
done(client_);
}
} else {
// Data is provided. This is the callback for a message.
if (subscribe != nullptr) {
// Parse the notification.
GcsEntry gcs_entry;
gcs_entry.ParseFromString(data);
ID id = ID::FromBinary(gcs_entry.id());
DataMap data_map;
if (gcs_entry.change_mode() == GcsChangeMode::REMOVE) {
for (const auto &key : gcs_entry.entries()) {
data_map.emplace(key, std::shared_ptr<Data>());
}
} else {
RAY_CHECK(gcs_entry.entries_size() % 2 == 0);
for (int i = 0; i < gcs_entry.entries_size(); i += 2) {
const auto &key = gcs_entry.entries(i);
const auto value = std::make_shared<Data>();
value->ParseFromString(gcs_entry.entries(i + 1));
data_map.emplace(key, std::move(value));
}
}
MapNotification<std::string, Data> notification(gcs_entry.change_mode(),
data_map);
std::vector<MapNotification<std::string, Data>> notification_vec;
notification_vec.emplace_back(std::move(notification));
subscribe(client_, id, notification_vec);
}
}
};
subscribe_callback_index_ = 1;
for (auto &context : shard_contexts_) {
RAY_RETURN_NOT_OK(context->SubscribeAsync(client_id, pubsub_channel_, callback,
&subscribe_callback_index_));
}
return Status::OK();
}
std::string ErrorTable::DebugString() const {
return Log<JobID, ErrorTableData>::DebugString();
}
std::string ProfileTable::DebugString() const {
return Log<UniqueID, ProfileTableData>::DebugString();
}
void ClientTable::RegisterNodeChangeCallback(const NodeChangeCallback &callback) {
RAY_CHECK(node_change_callback_ == nullptr);
node_change_callback_ = callback;
// Call the callback for any added clients that are cached.
for (const auto &entry : node_cache_) {
if (!entry.first.IsNil()) {
RAY_CHECK(entry.second.state() == GcsNodeInfo::ALIVE ||
entry.second.state() == GcsNodeInfo::DEAD);
node_change_callback_(entry.first, entry.second);
}
}
}
void ClientTable::HandleNotification(RedisGcsClient *client,
const GcsNodeInfo &node_info) {
ClientID node_id = ClientID::FromBinary(node_info.node_id());
bool is_alive = (node_info.state() == GcsNodeInfo::ALIVE);
// It's possible to get duplicate notifications from the client table, so
// check whether this notification is new.
auto entry = node_cache_.find(node_id);
bool is_notif_new;
if (entry == node_cache_.end()) {
// If the entry is not in the cache, then the notification is new.
is_notif_new = true;
} else {
// If the entry is in the cache, then the notification is new if the client
// was alive and is now dead or resources have been updated.
bool was_alive = (entry->second.state() == GcsNodeInfo::ALIVE);
is_notif_new = was_alive && !is_alive;
// Once a client with a given ID has been removed, it should never be added
// again. If the entry was in the cache and the client was deleted, check
// that this new notification is not an insertion.
if (!was_alive) {
RAY_CHECK(!is_alive)
<< "Notification for addition of a client that was already removed:" << node_id;
}
}
// Add the notification to our cache. Notifications are idempotent.
RAY_LOG(DEBUG) << "[ClientTableNotification] ClientTable Insertion/Deletion "
"notification for client id "
<< node_id << ". IsAlive: " << is_alive
<< ". Setting the client cache to data.";
node_cache_[node_id] = node_info;
// If the notification is new, call any registered callbacks.
GcsNodeInfo &cache_data = node_cache_[node_id];
if (is_notif_new) {
if (is_alive) {
RAY_CHECK(removed_nodes_.find(node_id) == removed_nodes_.end());
} else {
// NOTE(swang): The node should be added to this data structure before
// the callback gets called, in case the callback depends on the data
// structure getting updated.
removed_nodes_.insert(node_id);
}
if (node_change_callback_ != nullptr) {
node_change_callback_(node_id, cache_data);
}
}
}
const ClientID &ClientTable::GetLocalClientId() const {
RAY_CHECK(!local_node_id_.IsNil());
return local_node_id_;
}
const GcsNodeInfo &ClientTable::GetLocalClient() const { return local_node_info_; }
bool ClientTable::IsRemoved(const ClientID &node_id) const {
return removed_nodes_.count(node_id) == 1;
}
Status ClientTable::Connect(const GcsNodeInfo &local_node_info) {
RAY_CHECK(!disconnected_) << "Tried to reconnect a disconnected node.";
RAY_CHECK(local_node_id_.IsNil()) << "This node is already connected.";
RAY_CHECK(local_node_info.state() == GcsNodeInfo::ALIVE);
auto node_info_ptr = std::make_shared<GcsNodeInfo>(local_node_info);
Status status = SyncAppend(JobID::Nil(), client_log_key_, node_info_ptr);
if (status.ok()) {
local_node_id_ = ClientID::FromBinary(local_node_info.node_id());
local_node_info_ = local_node_info;
}
return status;
}
Status ClientTable::Disconnect() {
local_node_info_.set_state(GcsNodeInfo::DEAD);
auto node_info_ptr = std::make_shared<GcsNodeInfo>(local_node_info_);
Status status = SyncAppend(JobID::Nil(), client_log_key_, node_info_ptr);
if (status.ok()) {
// We successfully added the deletion entry. Mark ourselves as disconnected.
disconnected_ = true;
}
return status;
}
ray::Status ClientTable::MarkConnected(const GcsNodeInfo &node_info,
const WriteCallback &done) {
RAY_CHECK(node_info.state() == GcsNodeInfo::ALIVE);
auto node_info_ptr = std::make_shared<GcsNodeInfo>(node_info);
return Append(JobID::Nil(), client_log_key_, node_info_ptr, done);
}
ray::Status ClientTable::MarkDisconnected(const ClientID &dead_node_id,
const WriteCallback &done) {
auto node_info = std::make_shared<GcsNodeInfo>();
node_info->set_node_id(dead_node_id.Binary());
node_info->set_state(GcsNodeInfo::DEAD);
return Append(JobID::Nil(), client_log_key_, node_info, done);
}
ray::Status ClientTable::SubscribeToNodeChange(
const SubscribeCallback<ClientID, GcsNodeInfo> &subscribe,
const StatusCallback &done) {
// Callback for a notification from the client table.
auto on_subscribe = [this](RedisGcsClient *client, const UniqueID &log_key,
const std::vector<GcsNodeInfo> ¬ifications) {
RAY_CHECK(log_key == client_log_key_);
std::unordered_map<std::string, GcsNodeInfo> connected_nodes;
std::unordered_map<std::string, GcsNodeInfo> disconnected_nodes;
for (auto ¬ification : notifications) {
// This is temporary fix for Issue 4140 to avoid connect to dead nodes.
// TODO(yuhguo): remove this temporary fix after GCS entry is removable.
if (notification.state() == GcsNodeInfo::ALIVE) {
connected_nodes.emplace(notification.node_id(), notification);
} else {
auto iter = connected_nodes.find(notification.node_id());
if (iter != connected_nodes.end()) {
connected_nodes.erase(iter);
}
disconnected_nodes.emplace(notification.node_id(), notification);
}
}
for (const auto &pair : connected_nodes) {
HandleNotification(client, pair.second);
}
for (const auto &pair : disconnected_nodes) {
HandleNotification(client, pair.second);
}
};
// Callback to request notifications from the client table once we've
// successfully subscribed.
auto on_done = [this, subscribe, done](RedisGcsClient *client) {
auto on_request_notification_done = [this, subscribe, done](Status status) {
RAY_CHECK_OK(status);
if (done != nullptr) {
done(status);
}
// Register node change callbacks after RequestNotification finishes.
RegisterNodeChangeCallback(subscribe);
};
RAY_CHECK_OK(RequestNotifications(JobID::Nil(), client_log_key_, subscribe_id_,
on_request_notification_done));
};
// Subscribe to the client table.
return Subscribe(JobID::Nil(), subscribe_id_, on_subscribe, on_done);
}
bool ClientTable::GetClient(const ClientID &node_id, GcsNodeInfo *node_info) const {
RAY_CHECK(!node_id.IsNil());
auto entry = node_cache_.find(node_id);
auto found = (entry != node_cache_.end());
if (found) {
*node_info = entry->second;
}
return found;
}
const std::unordered_map<ClientID, GcsNodeInfo> &ClientTable::GetAllClients() const {
return node_cache_;
}
Status ClientTable::Lookup(const Callback &lookup) {
RAY_CHECK(lookup != nullptr);
return Log::Lookup(JobID::Nil(), client_log_key_, lookup);
}
std::string ClientTable::DebugString() const {
std::stringstream result;
result << Log<ClientID, GcsNodeInfo>::DebugString();
result << ", cache size: " << node_cache_.size()
<< ", num removed: " << removed_nodes_.size();
return result.str();
}
Status TaskLeaseTable::Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe,
const SubscriptionCallback &done) {
auto on_subscribe = [subscribe](RedisGcsClient *client, const TaskID &task_id,
const std::vector<TaskLeaseData> &data) {
std::vector<boost::optional<TaskLeaseData>> result;
for (const auto &item : data) {
boost::optional<TaskLeaseData> optional_item(item);
result.emplace_back(std::move(optional_item));
}
if (result.empty()) {
boost::optional<TaskLeaseData> optional_item;
result.emplace_back(std::move(optional_item));
}
subscribe(client, task_id, result);
};
return Table<TaskID, TaskLeaseData>::Subscribe(job_id, client_id, on_subscribe, done);
}
Status ActorCheckpointIdTable::AddCheckpointId(const JobID &job_id,
const ActorID &actor_id,
const ActorCheckpointID &checkpoint_id,
const WriteCallback &done) {
auto lookup_callback = [this, checkpoint_id, job_id, actor_id, done](
ray::gcs::RedisGcsClient *client, const ActorID &id,
const ActorCheckpointIdData &data) {
std::shared_ptr<ActorCheckpointIdData> copy =
std::make_shared<ActorCheckpointIdData>(data);
copy->add_timestamps(absl::GetCurrentTimeNanos() / 1000000);
copy->add_checkpoint_ids(checkpoint_id.Binary());
auto num_to_keep = RayConfig::instance().num_actor_checkpoints_to_keep();
while (copy->timestamps().size() > num_to_keep) {
// Delete the checkpoint from actor checkpoint table.
const auto &to_delete = ActorCheckpointID::FromBinary(copy->checkpoint_ids(0));
copy->mutable_checkpoint_ids()->erase(copy->mutable_checkpoint_ids()->begin());
copy->mutable_timestamps()->erase(copy->mutable_timestamps()->begin());
client_->actor_checkpoint_table().Delete(job_id, to_delete);
}
RAY_CHECK_OK(Add(job_id, actor_id, copy, done));
};
auto failure_callback = [this, checkpoint_id, job_id, actor_id, done](
ray::gcs::RedisGcsClient *client, const ActorID &id) {
std::shared_ptr<ActorCheckpointIdData> data =
std::make_shared<ActorCheckpointIdData>();
data->set_actor_id(id.Binary());
data->add_timestamps(absl::GetCurrentTimeNanos() / 1000000);
*data->add_checkpoint_ids() = checkpoint_id.Binary();
RAY_CHECK_OK(Add(job_id, actor_id, data, done));
};
return Lookup(job_id, actor_id, lookup_callback, failure_callback);
}
template class Log<ObjectID, ObjectTableData>;
template class Set<ObjectID, ObjectTableData>;
template class Log<TaskID, TaskTableData>;
template class Table<TaskID, TaskTableData>;
template class Log<ActorID, ActorTableData>;
template class Log<TaskID, TaskReconstructionData>;
template class Table<TaskID, TaskLeaseData>;
template class Table<ClientID, HeartbeatTableData>;
template class Table<ClientID, HeartbeatBatchTableData>;
template class Log<JobID, ErrorTableData>;
template class Log<ClientID, GcsNodeInfo>;
template class Log<JobID, JobTableData>;
template class Log<UniqueID, ProfileTableData>;
template class Table<ActorCheckpointID, ActorCheckpointData>;
template class Table<ActorID, ActorCheckpointIdData>;
template class Table<WorkerID, WorkerFailureData>;
template class Log<ClientID, ResourceTableData>;
template class Hash<ClientID, ResourceTableData>;
} // namespace gcs
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/tables.h
|
C/C++ Header
|
#ifndef RAY_GCS_TABLES_H
#define RAY_GCS_TABLES_H
#include <map>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "ray/common/constants.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/util/logging.h"
#include "ray/gcs/callback.h"
#include "ray/gcs/entry_change_notification.h"
#include "ray/gcs/redis_context.h"
#include "ray/protobuf/gcs.pb.h"
struct redisAsyncContext;
namespace ray {
namespace gcs {
using rpc::ActorCheckpointData;
using rpc::ActorCheckpointIdData;
using rpc::ActorTableData;
using rpc::ErrorTableData;
using rpc::GcsChangeMode;
using rpc::GcsEntry;
using rpc::GcsNodeInfo;
using rpc::HeartbeatBatchTableData;
using rpc::HeartbeatTableData;
using rpc::JobTableData;
using rpc::ObjectTableData;
using rpc::ProfileTableData;
using rpc::ResourceTableData;
using rpc::TablePrefix;
using rpc::TablePubsub;
using rpc::TaskLeaseData;
using rpc::TaskReconstructionData;
using rpc::TaskTableData;
using rpc::WorkerFailureData;
class RedisContext;
class RedisGcsClient;
/// Specifies whether commands issued to a table should be regular or chain-replicated
/// (when available).
enum class CommandType { kRegular, kChain, kUnknown };
/// \class PubsubInterface
///
/// The interface for a pubsub storage system. The client of a storage system
/// that implements this interface can request and cancel notifications for
/// specific keys.
template <typename ID>
class PubsubInterface {
public:
virtual Status RequestNotifications(const JobID &job_id, const ID &id,
const ClientID &client_id,
const StatusCallback &done) = 0;
virtual Status CancelNotifications(const JobID &job_id, const ID &id,
const ClientID &client_id,
const StatusCallback &done) = 0;
virtual ~PubsubInterface(){};
};
template <typename ID, typename Data>
class LogInterface {
public:
using WriteCallback =
std::function<void(RedisGcsClient *client, const ID &id, const Data &data)>;
virtual Status Append(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data, const WriteCallback &done) = 0;
virtual Status AppendAt(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data, const WriteCallback &done,
const WriteCallback &failure, int log_length) = 0;
virtual ~LogInterface(){};
};
/// \class Log
///
/// A GCS table where every entry is an append-only log. This class is not
/// meant to be used directly. All log classes should derive from this class
/// and override the prefix_ member with a unique prefix for that log, and the
/// pubsub_channel_ member if pubsub is required.
///
/// Example tables backed by Log:
/// ClientTable: Stores a log of which GCS clients have been added or deleted
/// from the system.
template <typename ID, typename Data>
class Log : public LogInterface<ID, Data>, virtual public PubsubInterface<ID> {
public:
using Callback = std::function<void(RedisGcsClient *client, const ID &id,
const std::vector<Data> &data)>;
using NotificationCallback =
std::function<void(RedisGcsClient *client, const ID &id,
const GcsChangeMode change_mode, const std::vector<Data> &data)>;
/// The callback to call when a write to a key succeeds.
using WriteCallback = typename LogInterface<ID, Data>::WriteCallback;
/// The callback to call when a SUBSCRIBE call completes and we are ready to
/// request and receive notifications.
using SubscriptionCallback = std::function<void(RedisGcsClient *client)>;
struct CallbackData {
ID id;
std::shared_ptr<Data> data;
Callback callback;
// An optional callback to call for subscription operations, where the
// first message is a notification of subscription success.
SubscriptionCallback subscription_callback;
Log<ID, Data> *log;
RedisGcsClient *client;
};
Log(const std::vector<std::shared_ptr<RedisContext>> &contexts, RedisGcsClient *client)
: shard_contexts_(contexts),
client_(client),
pubsub_channel_(TablePubsub::NO_PUBLISH),
prefix_(TablePrefix::UNUSED),
subscribe_callback_index_(-1){};
/// Append a log entry to a key.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param data Data to append to the log. TODO(rkn): This can be made const,
/// right?
/// \param done Callback that is called once the data has been written to the
/// GCS.
/// \return Status
Status Append(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done);
/// Append a log entry to a key synchronously.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param data Data to append to the log.
/// \return Status
Status SyncAppend(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data);
/// Append a log entry to a key if and only if the log has the given number
/// of entries.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param data Data to append to the log.
/// \param done Callback that is called if the data was appended to the log.
/// \param failure Callback that is called if the data was not appended to
/// the log because the log length did not match the given `log_length`.
/// \param log_length The number of entries that the log must have for the
/// append to succeed.
/// \return Status
Status AppendAt(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done, const WriteCallback &failure,
int log_length);
/// Lookup the log values at a key asynchronously.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is looked up in the GCS.
/// \param lookup Callback that is called after lookup. If the callback is
/// called with an empty vector, then there was no data at the key.
/// \return Status
Status Lookup(const JobID &job_id, const ID &id, const Callback &lookup);
/// Subscribe to any Append operations to this table. The caller may choose
/// requests notifications for. This may only be called once per Log
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each Add to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe Callback that is called on each received message. If the
/// callback is called with an empty vector, then there was no data at the key.
/// \param done Callback that is called when subscription is complete and we
/// are ready to receive messages.
/// \return Status
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe, const SubscriptionCallback &done);
/// Request notifications about a key in this table.
///
/// The notifications will be returned via the subscribe callback that was
/// registered by `Subscribe`. An initial notification will be returned for
/// the current values at the key, if any, and a subsequent notification will
/// be published for every following `Append` to the key. Before
/// notifications can be requested, the caller must first call `Subscribe`,
/// with the same `client_id`.
///
/// \param job_id The ID of the job.
/// \param id The ID of the key to request notifications for.
/// \param client_id The client who is requesting notifications. Before
/// \param done Callback that is called when request notifications is complete.
/// notifications can be requested, a call to `Subscribe` to this
/// table with the same `client_id` must complete successfully.
/// \return Status
Status RequestNotifications(const JobID &job_id, const ID &id,
const ClientID &client_id, const StatusCallback &done);
/// Cancel notifications about a key in this table.
///
/// \param job_id The ID of the job.
/// \param id The ID of the key to request notifications for.
/// \param client_id The client who originally requested notifications.
/// \param done Callback that is called when cancel notifications is complete.
/// \return Status
Status CancelNotifications(const JobID &job_id, const ID &id, const ClientID &client_id,
const StatusCallback &done);
/// Subscribe to any modifications to the key. The caller may choose
/// to subscribe to all modifications, or to subscribe only to keys that it
/// requests notifications for. This may only be called once per Log
/// instance. This function is different from public version due to
/// an additional parameter change_mode in NotificationCallback. Therefore this
/// function supports notifications of remove operations.
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each Add to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe Callback that is called on each received message. If the
/// callback is called with an empty vector, then there was no data at the key.
/// \param done Callback that is called when subscription is complete and we
/// are ready to receive messages.
/// \return Status
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const NotificationCallback &subscribe,
const SubscriptionCallback &done);
/// Delete an entire key from redis.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data to delete from the GCS.
/// \return Void.
void Delete(const JobID &job_id, const ID &id);
/// Delete several keys from redis.
///
/// \param job_id The ID of the job.
/// \param ids The vector of IDs to delete from the GCS.
/// \return Void.
void Delete(const JobID &job_id, const std::vector<ID> &ids);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
protected:
std::shared_ptr<RedisContext> GetRedisContext(const ID &id) {
static std::hash<ID> index;
return shard_contexts_[index(id) % shard_contexts_.size()];
}
/// The connection to the GCS.
std::vector<std::shared_ptr<RedisContext>> shard_contexts_;
/// The GCS client.
RedisGcsClient *client_;
/// The pubsub channel to subscribe to for notifications about keys in this
/// table. If no notifications are required, this should be set to
/// TablePubsub_NO_PUBLISH. If notifications are required, then this must be
/// unique across all instances of Log.
TablePubsub pubsub_channel_;
/// The prefix to use for keys in this table. This must be unique across all
/// instances of Log.
TablePrefix prefix_;
/// The index in the RedisCallbackManager for the callback that is called
/// when we receive notifications. This is >= 0 iff we have subscribed to the
/// table, otherwise -1.
int64_t subscribe_callback_index_;
/// Commands to a GCS table can either be regular (default) or chain-replicated.
CommandType command_type_ = CommandType::kRegular;
int64_t num_appends_ = 0;
int64_t num_lookups_ = 0;
};
template <typename ID, typename Data>
class TableInterface {
public:
using WriteCallback = typename Log<ID, Data>::WriteCallback;
virtual Status Add(const JobID &job_id, const ID &task_id,
const std::shared_ptr<Data> &data, const WriteCallback &done) = 0;
virtual ~TableInterface(){};
};
/// \class Table
///
/// A GCS table where every entry is a single data item. This class is not
/// meant to be used directly. All table classes should derive from this class
/// and override the prefix_ member with a unique prefix for that table, and
/// the pubsub_channel_ member if pubsub is required.
///
/// Example tables backed by Log:
/// TaskTable: Stores Task metadata needed for executing the task.
template <typename ID, typename Data>
class Table : private Log<ID, Data>,
public TableInterface<ID, Data>,
virtual public PubsubInterface<ID> {
public:
using Callback =
std::function<void(RedisGcsClient *client, const ID &id, const Data &data)>;
using WriteCallback = typename Log<ID, Data>::WriteCallback;
/// The callback to call when a Lookup call returns an empty entry.
using FailureCallback = std::function<void(RedisGcsClient *client, const ID &id)>;
/// The callback to call when a Subscribe call completes and we are ready to
/// request and receive notifications.
using SubscriptionCallback = typename Log<ID, Data>::SubscriptionCallback;
Table(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log<ID, Data>(contexts, client) {}
using Log<ID, Data>::RequestNotifications;
using Log<ID, Data>::CancelNotifications;
/// Expose this interface for use by subscription tools class SubscriptionExecutor.
/// In this way TaskTable() can also reuse class SubscriptionExecutor.
using Log<ID, Data>::Subscribe;
/// Add an entry to the table. This overwrites any existing data at the key.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param data Data that is added to the GCS.
/// \param done Callback that is called once the data has been written to the
/// GCS.
/// \return Status
Status Add(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done);
/// Lookup an entry asynchronously.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is looked up in the GCS.
/// \param lookup Callback that is called after lookup if there was data the
/// key.
/// \param failure Callback that is called after lookup if there was no data
/// at the key.
/// \return Status
Status Lookup(const JobID &job_id, const ID &id, const Callback &lookup,
const FailureCallback &failure);
/// Subscribe to any Add operations to this table. The caller may choose to
/// subscribe to all Adds, or to subscribe only to keys that it requests
/// notifications for. This may only be called once per Table instance.
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each Add to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe Callback that is called on each received message. If the
/// callback is called with an empty vector, then there was no data at the key.
/// \param failure Callback that is called if the key is empty at the time
/// that notifications are requested.
/// \param done Callback that is called when subscription is complete and we
/// are ready to receive messages.
/// \return Status
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe, const FailureCallback &failure,
const SubscriptionCallback &done);
/// Subscribe to any Add operations to this table. The caller may choose to
/// subscribe to all Adds, or to subscribe only to keys that it requests
/// notifications for. This may only be called once per Table instance.
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each Add to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe Callback that is called on each received message. If the
/// callback is called with an empty vector, then there was no data at the key.
/// \param done Callback that is called when subscription is complete and we
/// are ready to receive messages.
/// \return Status
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe, const SubscriptionCallback &done);
void Delete(const JobID &job_id, const ID &id) { Log<ID, Data>::Delete(job_id, id); }
void Delete(const JobID &job_id, const std::vector<ID> &ids) {
Log<ID, Data>::Delete(job_id, ids);
}
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
protected:
using Log<ID, Data>::shard_contexts_;
using Log<ID, Data>::client_;
using Log<ID, Data>::pubsub_channel_;
using Log<ID, Data>::prefix_;
using Log<ID, Data>::command_type_;
using Log<ID, Data>::GetRedisContext;
int64_t num_adds_ = 0;
int64_t num_lookups_ = 0;
};
template <typename ID, typename Data>
class SetInterface {
public:
using WriteCallback = typename Log<ID, Data>::WriteCallback;
virtual Status Add(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done) = 0;
virtual Status Remove(const JobID &job_id, const ID &id,
const std::shared_ptr<Data> &data, const WriteCallback &done) = 0;
virtual ~SetInterface(){};
};
/// \class Set
///
/// A GCS table where every entry is an addable & removable set. This class is not
/// meant to be used directly. All set classes should derive from this class
/// and override the prefix_ member with a unique prefix for that set, and the
/// pubsub_channel_ member if pubsub is required.
///
/// Example tables backed by Set:
/// ObjectTable: Stores a set of which clients have added an object.
template <typename ID, typename Data>
class Set : private Log<ID, Data>,
public SetInterface<ID, Data>,
virtual public PubsubInterface<ID> {
public:
using Callback = typename Log<ID, Data>::Callback;
using WriteCallback = typename Log<ID, Data>::WriteCallback;
using SubscriptionCallback = typename Log<ID, Data>::SubscriptionCallback;
Set(const std::vector<std::shared_ptr<RedisContext>> &contexts, RedisGcsClient *client)
: Log<ID, Data>(contexts, client) {}
using Log<ID, Data>::RequestNotifications;
using Log<ID, Data>::CancelNotifications;
using Log<ID, Data>::Lookup;
using Log<ID, Data>::Delete;
/// Add an entry to the set.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param data Data to add to the set.
/// \param done Callback that is called once the data has been written to the
/// GCS.
/// \return Status
Status Add(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done);
/// Remove an entry from the set.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is removed from the GCS.
/// \param data Data to remove from the set.
/// \param done Callback that is called once the data has been written to the
/// GCS.
/// \return Status
Status Remove(const JobID &job_id, const ID &id, const std::shared_ptr<Data> &data,
const WriteCallback &done);
using NotificationCallback =
std::function<void(RedisGcsClient *client, const ID &id,
const std::vector<ArrayNotification<Data>> &data)>;
/// Subscribe to any add or remove operations to this table.
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each add or remove to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe Callback that is called on each received message.
/// \param done Callback that is called when subscription is complete and we
/// are ready to receive messages.
/// \return Status
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const NotificationCallback &subscribe,
const SubscriptionCallback &done);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
protected:
using Log<ID, Data>::shard_contexts_;
using Log<ID, Data>::client_;
using Log<ID, Data>::pubsub_channel_;
using Log<ID, Data>::prefix_;
using Log<ID, Data>::GetRedisContext;
int64_t num_adds_ = 0;
int64_t num_removes_ = 0;
using Log<ID, Data>::num_lookups_;
};
template <typename ID, typename Data>
class HashInterface {
public:
using DataMap = std::unordered_map<std::string, std::shared_ptr<Data>>;
// Reuse Log's SubscriptionCallback when Subscribe is successfully called.
using SubscriptionCallback = typename Log<ID, Data>::SubscriptionCallback;
/// The callback function used by function Update & Lookup.
///
/// \param client The client on which the RemoveEntries is called.
/// \param id The ID of the Hash Table whose entries are removed.
/// \param data Map data contains the change to the Hash Table.
/// \return Void
using HashCallback =
std::function<void(RedisGcsClient *client, const ID &id, const DataMap &pairs)>;
/// The callback function used by function RemoveEntries.
///
/// \param client The client on which the RemoveEntries is called.
/// \param id The ID of the Hash Table whose entries are removed.
/// \param keys The keys that are moved from this Hash Table.
/// \return Void
using HashRemoveCallback = std::function<void(RedisGcsClient *client, const ID &id,
const std::vector<std::string> &keys)>;
/// The notification function used by function Subscribe.
///
/// \param client The client on which the Subscribe is called.
/// \param change_mode The mode to identify the data is removed or updated.
/// \param data Map data contains the change to the Hash Table.
/// \return Void
using HashNotificationCallback =
std::function<void(RedisGcsClient *client, const ID &id,
const std::vector<MapNotification<std::string, Data>> &data)>;
/// Add entries of a hash table.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is added to the GCS.
/// \param pairs Map data to add to the hash table.
/// \param done HashCallback that is called once the request data has been written to
/// the GCS.
/// \return Status
virtual Status Update(const JobID &job_id, const ID &id, const DataMap &pairs,
const HashCallback &done) = 0;
/// Remove entries from the hash table.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is removed from the GCS.
/// \param keys The entry keys of the hash table.
/// \param remove_callback HashRemoveCallback that is called once the data has been
/// written to the GCS no matter whether the key exists in the hash table.
/// \return Status
virtual Status RemoveEntries(const JobID &job_id, const ID &id,
const std::vector<std::string> &keys,
const HashRemoveCallback &remove_callback) = 0;
/// Lookup the map data of a hash table.
///
/// \param job_id The ID of the job.
/// \param id The ID of the data that is looked up in the GCS.
/// \param lookup HashCallback that is called after lookup. If the callback is
/// called with an empty hash table, then there was no data in the callback.
/// \return Status
virtual Status Lookup(const JobID &job_id, const ID &id,
const HashCallback &lookup) = 0;
/// Subscribe to any Update or Remove operations to this hash table.
///
/// \param job_id The ID of the job.
/// \param client_id The type of update to listen to. If this is nil, then a
/// message for each Update to the table will be received. Else, only
/// messages for the given client will be received. In the latter
/// case, the client may request notifications on specific keys in the
/// table via `RequestNotifications`.
/// \param subscribe HashNotificationCallback that is called on each received message.
/// \param done SubscriptionCallback that is called when subscription is complete and
/// we are ready to receive messages.
/// \return Status
virtual Status Subscribe(const JobID &job_id, const ClientID &client_id,
const HashNotificationCallback &subscribe,
const SubscriptionCallback &done) = 0;
virtual ~HashInterface(){};
};
template <typename ID, typename Data>
class Hash : private Log<ID, Data>,
public HashInterface<ID, Data>,
virtual public PubsubInterface<ID> {
public:
using DataMap = std::unordered_map<std::string, std::shared_ptr<Data>>;
using HashCallback = typename HashInterface<ID, Data>::HashCallback;
using HashRemoveCallback = typename HashInterface<ID, Data>::HashRemoveCallback;
using HashNotificationCallback =
typename HashInterface<ID, Data>::HashNotificationCallback;
using SubscriptionCallback = typename Log<ID, Data>::SubscriptionCallback;
Hash(const std::vector<std::shared_ptr<RedisContext>> &contexts, RedisGcsClient *client)
: Log<ID, Data>(contexts, client) {}
using Log<ID, Data>::RequestNotifications;
using Log<ID, Data>::CancelNotifications;
Status Update(const JobID &job_id, const ID &id, const DataMap &pairs,
const HashCallback &done) override;
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const HashNotificationCallback &subscribe,
const SubscriptionCallback &done) override;
Status Lookup(const JobID &job_id, const ID &id, const HashCallback &lookup) override;
Status RemoveEntries(const JobID &job_id, const ID &id,
const std::vector<std::string> &keys,
const HashRemoveCallback &remove_callback) override;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
protected:
using Log<ID, Data>::shard_contexts_;
using Log<ID, Data>::client_;
using Log<ID, Data>::pubsub_channel_;
using Log<ID, Data>::prefix_;
using Log<ID, Data>::subscribe_callback_index_;
using Log<ID, Data>::GetRedisContext;
int64_t num_adds_ = 0;
int64_t num_removes_ = 0;
using Log<ID, Data>::num_lookups_;
};
class DynamicResourceTable : public Hash<ClientID, ResourceTableData> {
public:
DynamicResourceTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Hash(contexts, client) {
pubsub_channel_ = TablePubsub::NODE_RESOURCE_PUBSUB;
prefix_ = TablePrefix::NODE_RESOURCE;
};
virtual ~DynamicResourceTable(){};
};
class ObjectTable : public Set<ObjectID, ObjectTableData> {
public:
ObjectTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Set(contexts, client) {
pubsub_channel_ = TablePubsub::OBJECT_PUBSUB;
prefix_ = TablePrefix::OBJECT;
};
virtual ~ObjectTable(){};
};
class HeartbeatTable : public Table<ClientID, HeartbeatTableData> {
public:
HeartbeatTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
pubsub_channel_ = TablePubsub::HEARTBEAT_PUBSUB;
prefix_ = TablePrefix::HEARTBEAT;
}
virtual ~HeartbeatTable() {}
};
class HeartbeatBatchTable : public Table<ClientID, HeartbeatBatchTableData> {
public:
HeartbeatBatchTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
pubsub_channel_ = TablePubsub::HEARTBEAT_BATCH_PUBSUB;
prefix_ = TablePrefix::HEARTBEAT_BATCH;
}
virtual ~HeartbeatBatchTable() {}
};
class JobTable : public Log<JobID, JobTableData> {
public:
JobTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
pubsub_channel_ = TablePubsub::JOB_PUBSUB;
prefix_ = TablePrefix::JOB;
};
virtual ~JobTable() {}
};
/// Actor table starts with an ALIVE entry, which represents the first time the actor
/// is created. This may be followed by 0 or more pairs of RECONSTRUCTING, ALIVE entries,
/// which represent each time the actor fails (RECONSTRUCTING) and gets recreated (ALIVE).
/// These may be followed by a DEAD entry, which means that the actor has failed and will
/// not be reconstructed.
class ActorTable : public Log<ActorID, ActorTableData> {
public:
ActorTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
pubsub_channel_ = TablePubsub::ACTOR_PUBSUB;
prefix_ = TablePrefix::ACTOR;
}
};
class WorkerFailureTable : public Table<WorkerID, WorkerFailureData> {
public:
WorkerFailureTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
pubsub_channel_ = TablePubsub::WORKER_FAILURE_PUBSUB;
prefix_ = TablePrefix::WORKER_FAILURE;
}
virtual ~WorkerFailureTable() {}
};
class TaskReconstructionLog : public Log<TaskID, TaskReconstructionData> {
public:
TaskReconstructionLog(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
prefix_ = TablePrefix::TASK_RECONSTRUCTION;
}
};
class TaskLeaseTable : public Table<TaskID, TaskLeaseData> {
public:
/// Use boost::optional to represent subscription results, so that we can
/// notify raylet whether the entry of task lease is empty.
using Callback =
std::function<void(RedisGcsClient *client, const TaskID &task_id,
const std::vector<boost::optional<TaskLeaseData>> &data)>;
TaskLeaseTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
pubsub_channel_ = TablePubsub::TASK_LEASE_PUBSUB;
prefix_ = TablePrefix::TASK_LEASE;
}
Status Add(const JobID &job_id, const TaskID &id,
const std::shared_ptr<TaskLeaseData> &data,
const WriteCallback &done) override {
RAY_RETURN_NOT_OK((Table<TaskID, TaskLeaseData>::Add(job_id, id, data, done)));
// Mark the entry for expiration in Redis. It's okay if this command fails
// since the lease entry itself contains the expiration period. In the
// worst case, if the command fails, then a client that looks up the lease
// entry will overestimate the expiration time.
// TODO(swang): Use a common helper function to format the key instead of
// hardcoding it to match the Redis module.
std::vector<std::string> args = {"PEXPIRE", TablePrefix_Name(prefix_) + id.Binary(),
std::to_string(data->timeout())};
return GetRedisContext(id)->RunArgvAsync(args);
}
/// Implement this method for the subscription tools class SubscriptionExecutor.
/// In this way TaskLeaseTable() can also reuse class SubscriptionExecutor.
Status Subscribe(const JobID &job_id, const ClientID &client_id,
const Callback &subscribe, const SubscriptionCallback &done);
};
class ActorCheckpointTable : public Table<ActorCheckpointID, ActorCheckpointData> {
public:
ActorCheckpointTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
prefix_ = TablePrefix::ACTOR_CHECKPOINT;
};
};
class ActorCheckpointIdTable : public Table<ActorID, ActorCheckpointIdData> {
public:
ActorCheckpointIdTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
prefix_ = TablePrefix::ACTOR_CHECKPOINT_ID;
};
/// Add a checkpoint id to an actor, and remove a previous checkpoint if the
/// total number of checkpoints in GCS exceeds the max allowed value.
///
/// \param job_id The ID of the job.
/// \param actor_id ID of the actor.
/// \param checkpoint_id ID of the checkpoint.
/// \return Status.
Status AddCheckpointId(const JobID &job_id, const ActorID &actor_id,
const ActorCheckpointID &checkpoint_id,
const WriteCallback &done);
};
namespace raylet {
class TaskTable : public Table<TaskID, TaskTableData> {
public:
TaskTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Table(contexts, client) {
pubsub_channel_ = TablePubsub::RAYLET_TASK_PUBSUB;
prefix_ = TablePrefix::RAYLET_TASK;
}
TaskTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client, gcs::CommandType command_type)
: TaskTable(contexts, client) {
command_type_ = command_type;
};
};
} // namespace raylet
class ErrorTable : public Log<JobID, ErrorTableData> {
public:
ErrorTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
pubsub_channel_ = TablePubsub::ERROR_INFO_PUBSUB;
prefix_ = TablePrefix::ERROR_INFO;
};
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
};
class ProfileTable : public Log<UniqueID, ProfileTableData> {
public:
ProfileTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
prefix_ = TablePrefix::PROFILE;
};
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
};
/// \class ClientTable
///
/// The ClientTable stores information about active and inactive clients. It is
/// structured as a single log stored at a key known to all clients. When a
/// client connects, it appends an entry to the log indicating that it is
/// alive. When a client disconnects, or if another client detects its failure,
/// it should append an entry to the log indicating that it is dead. A client
/// that is marked as dead should never again be marked as alive; if it needs
/// to reconnect, it must connect with a different ClientID.
class ClientTable : public Log<ClientID, GcsNodeInfo> {
public:
ClientTable(const std::vector<std::shared_ptr<RedisContext>> &contexts,
RedisGcsClient *client)
: Log(contexts, client) {
pubsub_channel_ = TablePubsub::CLIENT_PUBSUB;
prefix_ = TablePrefix::CLIENT;
};
/// Connect as a client to the GCS. This registers us in the client table
/// and begins subscription to client table notifications.
///
/// \param local_node_info Information about the connecting client. This must have the
/// same id as the one set in the client table.
/// \return Status
ray::Status Connect(const GcsNodeInfo &local_node_info);
/// Disconnect the client from the GCS. The client ID assigned during
/// registration should never be reused after disconnecting.
///
/// \return Status
ray::Status Disconnect();
/// Mark a new node as connected to GCS asynchronously.
///
/// \param node_info Information about the node.
/// \param done Callback that is called once the node has been marked to connected.
/// \return Status
ray::Status MarkConnected(const GcsNodeInfo &node_info, const WriteCallback &done);
/// Mark a different node as disconnected. The client ID should never be
/// reused for a new node.
///
/// \param dead_node_id The ID of the node to mark as dead.
/// \param done Callback that is called once the node has been marked to
/// disconnected.
/// \return Status
ray::Status MarkDisconnected(const ClientID &dead_node_id, const WriteCallback &done);
ray::Status SubscribeToNodeChange(
const SubscribeCallback<ClientID, GcsNodeInfo> &subscribe,
const StatusCallback &done);
/// Get a client's information from the cache. The cache only contains
/// information for clients that we've heard a notification for.
///
/// \param client The client to get information about.
/// \param node_info The client information will be copied here if
/// we have the client in the cache.
/// a nil client ID.
/// \return Whether teh client is in the cache.
bool GetClient(const ClientID &client, GcsNodeInfo *node_info) const;
/// Get the local client's ID.
///
/// \return The local client's ID.
const ClientID &GetLocalClientId() const;
/// Get the local client's information.
///
/// \return The local client's information.
const GcsNodeInfo &GetLocalClient() const;
/// Check whether the given client is removed.
///
/// \param node_id The ID of the client to check.
/// \return Whether the client with ID client_id is removed.
bool IsRemoved(const ClientID &node_id) const;
/// Get the information of all clients.
///
/// \return The client ID to client information map.
const std::unordered_map<ClientID, GcsNodeInfo> &GetAllClients() const;
/// Lookup the client data in the client table.
///
/// \param lookup Callback that is called after lookup. If the callback is
/// called with an empty vector, then there was no data at the key.
/// \return Status.
Status Lookup(const Callback &lookup);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// The key at which the log of client information is stored. This key must
/// be kept the same across all instances of the ClientTable, so that all
/// clients append and read from the same key.
ClientID client_log_key_;
private:
using NodeChangeCallback =
std::function<void(const ClientID &id, const GcsNodeInfo &node_info)>;
/// Register a callback to call when a new node is added or a node is removed.
///
/// \param callback The callback to register.
void RegisterNodeChangeCallback(const NodeChangeCallback &callback);
/// Handle a client table notification.
void HandleNotification(RedisGcsClient *client, const GcsNodeInfo &node_info);
/// Whether this client has called Disconnect().
bool disconnected_{false};
/// This node's ID. It will be initialized when we call method `Connect(...)`.
ClientID local_node_id_;
/// Information about this node.
GcsNodeInfo local_node_info_;
/// This ID is used in method `SubscribeToNodeChange(...)` to Subscribe and
/// RequestNotification.
/// The reason for not using `local_node_id_` is because it is only initialized
/// for registered nodes.
ClientID subscribe_id_{ClientID::FromRandom()};
/// The callback to call when a new node is added or a node is removed.
NodeChangeCallback node_change_callback_{nullptr};
/// A cache for information about all nodes.
std::unordered_map<ClientID, GcsNodeInfo> node_cache_;
/// The set of removed nodes.
std::unordered_set<ClientID> removed_nodes_;
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_TABLES_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/accessor_test_base.h
|
C/C++ Header
|
#ifndef RAY_GCS_ACCESSOR_TEST_BASE_H
#define RAY_GCS_ACCESSOR_TEST_BASE_H
#include <atomic>
#include <chrono>
#include <string>
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/util/test_util.h"
namespace ray {
namespace gcs {
template <typename ID, typename Data>
class AccessorTestBase : public RedisServiceManagerForTest {
public:
AccessorTestBase() {}
virtual ~AccessorTestBase() {}
virtual void SetUp() {
GenTestData();
GcsClientOptions options = GcsClientOptions("127.0.0.1", REDIS_SERVER_PORT, "", true);
gcs_client_.reset(new RedisGcsClient(options));
RAY_CHECK_OK(gcs_client_->Connect(io_service_));
work_thread_.reset(new std::thread([this] {
std::unique_ptr<boost::asio::io_service::work> work(
new boost::asio::io_service::work(io_service_));
io_service_.run();
}));
}
virtual void TearDown() {
gcs_client_->Disconnect();
io_service_.stop();
work_thread_->join();
work_thread_.reset();
gcs_client_.reset();
ClearTestData();
}
protected:
virtual void GenTestData() = 0;
void ClearTestData() { id_to_data_.clear(); }
void WaitPendingDone(std::chrono::milliseconds timeout) {
WaitPendingDone(pending_count_, timeout);
}
void WaitPendingDone(std::atomic<int> &pending_count,
std::chrono::milliseconds timeout) {
auto condition = [&pending_count]() { return pending_count == 0; };
EXPECT_TRUE(WaitForCondition(condition, timeout.count()));
}
protected:
std::unique_ptr<RedisGcsClient> gcs_client_;
boost::asio::io_service io_service_;
std::unique_ptr<std::thread> work_thread_;
std::unordered_map<ID, std::shared_ptr<Data>> id_to_data_;
std::atomic<int> pending_count_{0};
std::chrono::milliseconds wait_pending_timeout_{10000};
};
} // namespace gcs
} // namespace ray
#endif // RAY_GCS_ACCESSOR_TEST_BASE_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/asio_test.cc
|
C++
|
#include <iostream>
#include "gtest/gtest.h"
#include "ray/gcs/asio.h"
#include "ray/util/logging.h"
#include "ray/util/test_util.h"
extern "C" {
#include "hiredis/async.h"
#include "hiredis/hiredis.h"
}
namespace ray {
namespace gcs {
boost::asio::io_service io_service;
void ConnectCallback(const redisAsyncContext *c, int status) {
ASSERT_EQ(status, REDIS_OK);
}
void DisconnectCallback(const redisAsyncContext *c, int status) {
ASSERT_EQ(status, REDIS_OK);
}
void GetCallback(redisAsyncContext *c, void *r, void *privdata) {
redisReply *reply = reinterpret_cast<redisReply *>(r);
ASSERT_TRUE(reply != nullptr);
ASSERT_TRUE(std::string(reinterpret_cast<char *>(reply->str)) == "test");
io_service.stop();
}
class RedisAsioTest : public RedisServiceManagerForTest {};
TEST_F(RedisAsioTest, TestRedisCommands) {
redisAsyncContext *ac = redisAsyncConnect("127.0.0.1", REDIS_SERVER_PORT);
ASSERT_TRUE(ac->err == 0);
ray::gcs::RedisAsyncContext redis_async_context(ac);
RedisAsioClient client(io_service, redis_async_context);
redisAsyncSetConnectCallback(ac, ConnectCallback);
redisAsyncSetDisconnectCallback(ac, DisconnectCallback);
redisAsyncCommand(ac, NULL, NULL, "SET key test");
redisAsyncCommand(ac, GetCallback, nullptr, "GET key");
io_service.run();
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/redis_actor_info_accessor_test.cc
|
C++
|
#include <atomic>
#include <chrono>
#include <string>
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/test/accessor_test_base.h"
#include "ray/util/test_util.h"
namespace ray {
namespace gcs {
class ActorInfoAccessorTest : public AccessorTestBase<ActorID, ActorTableData> {
protected:
virtual void GenTestData() {
for (size_t i = 0; i < 100; ++i) {
std::shared_ptr<ActorTableData> actor = std::make_shared<ActorTableData>();
actor->set_max_reconstructions(1);
actor->set_remaining_reconstructions(1);
JobID job_id = JobID::FromInt(i);
actor->set_job_id(job_id.Binary());
actor->set_state(ActorTableData::ALIVE);
ActorID actor_id = ActorID::Of(job_id, RandomTaskId(), /*parent_task_counter=*/i);
actor->set_actor_id(actor_id.Binary());
id_to_data_[actor_id] = actor;
}
GenCheckpointData();
}
void GenCheckpointData() {
for (const auto item : id_to_data_) {
const ActorID &id = item.first;
ActorCheckpointList checkpoints;
for (size_t i = 0; i < checkpoint_number_; ++i) {
ActorCheckpointID checkpoint_id = ActorCheckpointID::FromRandom();
auto checkpoint = std::make_shared<ActorCheckpointData>();
checkpoint->set_actor_id(id.Binary());
checkpoint->set_checkpoint_id(checkpoint_id.Binary());
checkpoint->set_execution_dependency(checkpoint_id.Binary());
checkpoints.emplace_back(checkpoint);
}
id_to_checkpoints_[id] = std::move(checkpoints);
}
}
typedef std::vector<std::shared_ptr<ActorCheckpointData>> ActorCheckpointList;
std::unordered_map<ActorID, ActorCheckpointList> id_to_checkpoints_;
size_t checkpoint_number_{2};
};
TEST_F(ActorInfoAccessorTest, RegisterAndGet) {
ActorInfoAccessor &actor_accessor = gcs_client_->Actors();
// register
for (const auto &elem : id_to_data_) {
const auto &actor = elem.second;
++pending_count_;
RAY_CHECK_OK(actor_accessor.AsyncRegister(actor, [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
// get
for (const auto &elem : id_to_data_) {
++pending_count_;
RAY_CHECK_OK(actor_accessor.AsyncGet(
elem.first, [this](Status status, const boost::optional<ActorTableData> &data) {
ASSERT_TRUE(data);
ActorID actor_id = ActorID::FromBinary(data->actor_id());
auto it = id_to_data_.find(actor_id);
ASSERT_TRUE(it != id_to_data_.end());
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
}
TEST_F(ActorInfoAccessorTest, Subscribe) {
ActorInfoAccessor &actor_accessor = gcs_client_->Actors();
// subscribe
std::atomic<int> sub_pending_count(0);
std::atomic<int> do_sub_pending_count(0);
auto subscribe = [this, &sub_pending_count](const ActorID &actor_id,
const ActorTableData &data) {
const auto it = id_to_data_.find(actor_id);
ASSERT_TRUE(it != id_to_data_.end());
--sub_pending_count;
};
auto done = [&do_sub_pending_count](Status status) {
RAY_CHECK_OK(status);
--do_sub_pending_count;
};
++do_sub_pending_count;
RAY_CHECK_OK(actor_accessor.AsyncSubscribeAll(subscribe, done));
// Wait until subscribe finishes.
WaitPendingDone(do_sub_pending_count, wait_pending_timeout_);
// register
std::atomic<int> register_pending_count(0);
for (const auto &elem : id_to_data_) {
const auto &actor = elem.second;
++sub_pending_count;
++register_pending_count;
RAY_CHECK_OK(
actor_accessor.AsyncRegister(actor, [®ister_pending_count](Status status) {
RAY_CHECK_OK(status);
--register_pending_count;
}));
}
// Wait until register finishes.
WaitPendingDone(register_pending_count, wait_pending_timeout_);
// Wait for all subscribe notifications.
WaitPendingDone(sub_pending_count, wait_pending_timeout_);
}
TEST_F(ActorInfoAccessorTest, GetActorCheckpointTest) {
ActorInfoAccessor &actor_accessor = gcs_client_->Actors();
auto on_add_done = [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
};
for (size_t index = 0; index < checkpoint_number_; ++index) {
for (const auto &actor_checkpoints : id_to_checkpoints_) {
const ActorCheckpointList &checkpoints = actor_checkpoints.second;
const auto &checkpoint = checkpoints[index];
++pending_count_;
Status status = actor_accessor.AsyncAddCheckpoint(checkpoint, on_add_done);
RAY_CHECK_OK(status);
}
WaitPendingDone(wait_pending_timeout_);
}
for (const auto &actor_checkpoints : id_to_checkpoints_) {
const ActorCheckpointList &checkpoints = actor_checkpoints.second;
for (const auto &checkpoint : checkpoints) {
ActorCheckpointID checkpoint_id =
ActorCheckpointID::FromBinary(checkpoint->checkpoint_id());
auto on_get_done = [this, checkpoint_id](
Status status,
const boost::optional<ActorCheckpointData> &result) {
RAY_CHECK(result);
ActorCheckpointID result_checkpoint_id =
ActorCheckpointID::FromBinary(result->checkpoint_id());
ASSERT_EQ(checkpoint_id, result_checkpoint_id);
--pending_count_;
};
++pending_count_;
Status status = actor_accessor.AsyncGetCheckpoint(checkpoint_id, on_get_done);
RAY_CHECK_OK(status);
}
}
WaitPendingDone(wait_pending_timeout_);
for (const auto &actor_checkpoints : id_to_checkpoints_) {
const ActorID &actor_id = actor_checkpoints.first;
const ActorCheckpointList &checkpoints = actor_checkpoints.second;
auto on_get_done = [this, &checkpoints](
Status status,
const boost::optional<ActorCheckpointIdData> &result) {
RAY_CHECK(result);
ASSERT_EQ(checkpoints.size(), result->checkpoint_ids_size());
for (size_t i = 0; i < checkpoints.size(); ++i) {
const std::string checkpoint_id_str = checkpoints[i]->checkpoint_id();
const std::string &result_checkpoint_id_str = result->checkpoint_ids(i);
ASSERT_EQ(checkpoint_id_str, result_checkpoint_id_str);
}
--pending_count_;
};
++pending_count_;
Status status = actor_accessor.AsyncGetCheckpointID(actor_id, on_get_done);
RAY_CHECK_OK(status);
}
WaitPendingDone(wait_pending_timeout_);
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/redis_gcs_client_test.cc
|
C++
|
#include "gtest/gtest.h"
// TODO(pcm): get rid of this and replace with the type safe plasma event loop
extern "C" {
#include "hiredis/hiredis.h"
}
#include "ray/common/ray_config.h"
#include "ray/gcs/pb_util.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/tables.h"
#include "ray/util/test_util.h"
namespace ray {
namespace gcs {
/* Flush redis. */
static inline void flushall_redis(void) {
redisContext *context = redisConnect("127.0.0.1", REDIS_SERVER_PORT);
freeReplyObject(redisCommand(context, "FLUSHALL"));
redisFree(context);
}
/// A helper function to generate an unique JobID.
inline JobID NextJobID() {
static int32_t counter = 0;
return JobID::FromInt(++counter);
}
class TestGcs : public RedisServiceManagerForTest {
public:
TestGcs(CommandType command_type) : num_callbacks_(0), command_type_(command_type) {
job_id_ = NextJobID();
}
virtual ~TestGcs() {
// Clear all keys in the GCS.
flushall_redis();
};
virtual void Start() = 0;
virtual void Stop() = 0;
uint64_t NumCallbacks() const { return num_callbacks_; }
void IncrementNumCallbacks() { num_callbacks_++; }
protected:
uint64_t num_callbacks_;
gcs::CommandType command_type_;
std::shared_ptr<gcs::RedisGcsClient> client_;
JobID job_id_;
};
TestGcs *test;
ClientID local_client_id = ClientID::FromRandom();
class TestGcsWithAsio : public TestGcs {
public:
TestGcsWithAsio(CommandType command_type)
: TestGcs(command_type), io_service_(), work_(io_service_) {}
TestGcsWithAsio() : TestGcsWithAsio(CommandType::kRegular) {}
~TestGcsWithAsio() {
// Destroy the client first since it has a reference to the event loop.
client_->Disconnect();
client_.reset();
}
void SetUp() override {
GcsClientOptions options("127.0.0.1", REDIS_SERVER_PORT, "", true);
client_ = std::make_shared<gcs::RedisGcsClient>(options, command_type_);
RAY_CHECK_OK(client_->Connect(io_service_));
}
void Start() override { io_service_.run(); }
void Stop() override { io_service_.stop(); }
private:
boost::asio::io_service io_service_;
// Give the event loop some work so that it's forced to run until Stop() is
// called.
boost::asio::io_service::work work_;
};
class TestGcsWithChainAsio : public TestGcsWithAsio {
public:
TestGcsWithChainAsio() : TestGcsWithAsio(gcs::CommandType::kChain){};
};
class TaskTableTestHelper {
public:
/// A helper function that creates a GCS `TaskTableData` object.
static std::shared_ptr<TaskTableData> CreateTaskTableData(const TaskID &task_id,
uint64_t num_returns = 0) {
auto data = std::make_shared<TaskTableData>();
data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
data->mutable_task()->mutable_task_spec()->set_num_returns(num_returns);
return data;
}
/// A helper function that compare whether 2 `TaskTableData` objects are equal.
/// Note, this function only compares fields set by `CreateTaskTableData`.
static bool TaskTableDataEqual(const TaskTableData &data1, const TaskTableData &data2) {
const auto &spec1 = data1.task().task_spec();
const auto &spec2 = data2.task().task_spec();
return (spec1.task_id() == spec2.task_id() &&
spec1.num_returns() == spec2.num_returns());
}
static void TestTableLookup(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
const auto task_id = RandomTaskId();
const auto data = CreateTaskTableData(task_id);
// Check that we added the correct task.
auto add_callback = [task_id, data](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &d) {
ASSERT_EQ(id, task_id);
ASSERT_TRUE(TaskTableDataEqual(*data, d));
};
// Check that the lookup returns the added task.
auto lookup_callback = [task_id, data](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &d) {
ASSERT_EQ(id, task_id);
ASSERT_TRUE(TaskTableDataEqual(*data, d));
test->Stop();
};
// Check that the lookup does not return an empty entry.
auto failure_callback = [](gcs::RedisGcsClient *client, const TaskID &id) {
RAY_CHECK(false);
};
// Add the task, then do a lookup.
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id, data, add_callback));
RAY_CHECK_OK(client->raylet_task_table().Lookup(job_id, task_id, lookup_callback,
failure_callback));
// Run the event loop. The loop will only stop if the Lookup callback is
// called (or an assertion failure).
test->Start();
}
static void TestTableLookupFailure(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
TaskID task_id = RandomTaskId();
// Check that the lookup does not return data.
auto lookup_callback = [](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &d) { RAY_CHECK(false); };
// Check that the lookup returns an empty entry.
auto failure_callback = [task_id](gcs::RedisGcsClient *client, const TaskID &id) {
ASSERT_EQ(id, task_id);
test->Stop();
};
// Lookup the task. We have not done any writes, so the key should be empty.
RAY_CHECK_OK(client->raylet_task_table().Lookup(job_id, task_id, lookup_callback,
failure_callback));
// Run the event loop. The loop will only stop if the failure callback is
// called (or an assertion failure).
test->Start();
}
static void TestDeleteKeysFromTable(
const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client,
std::vector<std::shared_ptr<TaskTableData>> &data_vector, bool stop_at_end) {
std::vector<TaskID> ids;
TaskID task_id;
for (auto &data : data_vector) {
task_id = RandomTaskId();
ids.push_back(task_id);
// Check that we added the correct object entries.
auto add_callback = [task_id, data](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &d) {
ASSERT_EQ(id, task_id);
ASSERT_TRUE(TaskTableDataEqual(*data, d));
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id, data, add_callback));
}
for (const auto &task_id : ids) {
auto task_lookup_callback = [task_id](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &data) {
ASSERT_EQ(id, task_id);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->raylet_task_table().Lookup(job_id, task_id,
task_lookup_callback, nullptr));
}
if (ids.size() == 1) {
client->raylet_task_table().Delete(job_id, ids[0]);
} else {
client->raylet_task_table().Delete(job_id, ids);
}
auto expected_failure_callback = [](RedisGcsClient *client, const TaskID &id) {
ASSERT_TRUE(true);
test->IncrementNumCallbacks();
};
auto undesired_callback = [](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &data) { ASSERT_TRUE(false); };
for (size_t i = 0; i < ids.size(); ++i) {
RAY_CHECK_OK(client->raylet_task_table().Lookup(job_id, task_id, undesired_callback,
expected_failure_callback));
}
if (stop_at_end) {
auto stop_callback = [](RedisGcsClient *client, const TaskID &id) { test->Stop(); };
RAY_CHECK_OK(
client->raylet_task_table().Lookup(job_id, ids[0], nullptr, stop_callback));
}
}
static void TestTableSubscribeId(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
size_t num_modifications = 3;
// Add a table entry.
TaskID task_id1 = RandomTaskId();
// Add a table entry at a second key.
TaskID task_id2 = RandomTaskId();
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto notification_callback = [task_id2, num_modifications](
gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &data) {
// Check that we only get notifications for the requested key.
ASSERT_EQ(id, task_id2);
// Check that we get notifications in the same order as the writes.
ASSERT_TRUE(
TaskTableDataEqual(data, *CreateTaskTableData(task_id2, test->NumCallbacks())));
test->IncrementNumCallbacks();
if (test->NumCallbacks() == num_modifications) {
test->Stop();
}
};
// The failure callback should be called once since both keys start as empty.
bool failure_notification_received = false;
auto failure_callback = [task_id2, &failure_notification_received](
gcs::RedisGcsClient *client, const TaskID &id) {
ASSERT_EQ(id, task_id2);
// The failure notification should be the first notification received.
ASSERT_EQ(test->NumCallbacks(), 0);
failure_notification_received = true;
};
// The callback for subscription success. Once we've subscribed, request
// notifications for only one of the keys, then write to both keys.
auto subscribe_callback = [job_id, task_id1, task_id2,
num_modifications](gcs::RedisGcsClient *client) {
// Request notifications for one of the keys.
RAY_CHECK_OK(client->raylet_task_table().RequestNotifications(
job_id, task_id2, local_client_id, nullptr));
// Write both keys. We should only receive notifications for the key that
// we requested them for.
for (uint64_t i = 0; i < num_modifications; i++) {
auto data = CreateTaskTableData(task_id1, i);
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id1, data, nullptr));
}
for (uint64_t i = 0; i < num_modifications; i++) {
auto data = CreateTaskTableData(task_id2, i);
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id2, data, nullptr));
}
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->raylet_task_table().Subscribe(
job_id, local_client_id, notification_callback, failure_callback,
subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that the failure callback was called since the key was initially
// empty.
ASSERT_TRUE(failure_notification_received);
// Check that we received one notification callback for each write to the
// requested key.
ASSERT_EQ(test->NumCallbacks(), num_modifications);
}
static void TestTableSubscribeCancel(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Add a table entry.
const auto task_id = RandomTaskId();
const int num_modifications = 3;
const auto data = CreateTaskTableData(task_id, 0);
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id, data, nullptr));
// The failure callback should not be called since all keys are non-empty
// when notifications are requested.
auto failure_callback = [](gcs::RedisGcsClient *client, const TaskID &id) {
RAY_CHECK(false);
};
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto notification_callback = [task_id](gcs::RedisGcsClient *client, const TaskID &id,
const TaskTableData &data) {
ASSERT_EQ(id, task_id);
// Check that we only get notifications for the first and last writes,
// since notifications are canceled in between.
if (test->NumCallbacks() == 0) {
ASSERT_TRUE(TaskTableDataEqual(data, *CreateTaskTableData(task_id, 0)));
} else {
ASSERT_TRUE(TaskTableDataEqual(
data, *CreateTaskTableData(task_id, num_modifications - 1)));
}
test->IncrementNumCallbacks();
if (test->NumCallbacks() == num_modifications - 1) {
test->Stop();
}
};
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto subscribe_callback = [job_id, task_id](gcs::RedisGcsClient *client) {
// Request notifications, then cancel immediately. We should receive a
// notification for the current value at the key.
RAY_CHECK_OK(client->raylet_task_table().RequestNotifications(
job_id, task_id, local_client_id, nullptr));
RAY_CHECK_OK(client->raylet_task_table().CancelNotifications(
job_id, task_id, local_client_id, nullptr));
// Write to the key. Since we canceled notifications, we should not receive
// a notification for these writes.
for (uint64_t i = 1; i < num_modifications; i++) {
auto data = CreateTaskTableData(task_id, i);
RAY_CHECK_OK(client->raylet_task_table().Add(job_id, task_id, data, nullptr));
}
// Request notifications again. We should receive a notification for the
// current value at the key.
RAY_CHECK_OK(client->raylet_task_table().RequestNotifications(
job_id, task_id, local_client_id, nullptr));
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->raylet_task_table().Subscribe(
job_id, local_client_id, notification_callback, failure_callback,
subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that we received a notification callback for the first and least
// writes to the key, since notifications are canceled in between.
ASSERT_EQ(test->NumCallbacks(), 2);
}
};
// Convenient macro to test across {ae, asio} x {regular, chain} x {the tests}.
// Undefined at the end.
#define TEST_TASK_TABLE_MACRO(FIXTURE, TEST) \
TEST_F(FIXTURE, TEST) { \
test = this; \
TaskTableTestHelper::TEST(job_id_, client_); \
}
TEST_TASK_TABLE_MACRO(TestGcsWithAsio, TestTableLookup);
class LogLookupTestHelper {
public:
static void TestLogLookup(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Append some entries to the log at an object ID.
TaskID task_id = RandomTaskId();
std::vector<std::string> node_manager_ids = {"abc", "def", "ghi"};
for (auto &node_manager_id : node_manager_ids) {
auto data = std::make_shared<TaskReconstructionData>();
data->set_node_manager_id(node_manager_id);
// Check that we added the correct object entries.
auto add_callback = [task_id, data](gcs::RedisGcsClient *client, const TaskID &id,
const TaskReconstructionData &d) {
ASSERT_EQ(id, task_id);
ASSERT_EQ(data->node_manager_id(), d.node_manager_id());
};
RAY_CHECK_OK(
client->task_reconstruction_log().Append(job_id, task_id, data, add_callback));
}
// Check that lookup returns the added object entries.
auto lookup_callback = [task_id, node_manager_ids](
gcs::RedisGcsClient *client, const TaskID &id,
const std::vector<TaskReconstructionData> &data) {
ASSERT_EQ(id, task_id);
for (const auto &entry : data) {
ASSERT_EQ(entry.node_manager_id(), node_manager_ids[test->NumCallbacks()]);
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == node_manager_ids.size()) {
test->Stop();
}
};
// Do a lookup at the object ID.
RAY_CHECK_OK(
client->task_reconstruction_log().Lookup(job_id, task_id, lookup_callback));
// Run the event loop. The loop will only stop if the Lookup callback is
// called (or an assertion failure).
test->Start();
ASSERT_EQ(test->NumCallbacks(), node_manager_ids.size());
}
static void TestLogAppendAt(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
TaskID task_id = RandomTaskId();
std::vector<std::string> node_manager_ids = {"A", "B"};
std::vector<std::shared_ptr<TaskReconstructionData>> data_log;
for (const auto &node_manager_id : node_manager_ids) {
auto data = std::make_shared<TaskReconstructionData>();
data->set_node_manager_id(node_manager_id);
data_log.push_back(data);
}
// Check that we added the correct task.
auto failure_callback = [task_id](gcs::RedisGcsClient *client, const TaskID &id,
const TaskReconstructionData &d) {
ASSERT_EQ(id, task_id);
test->IncrementNumCallbacks();
};
// Will succeed.
RAY_CHECK_OK(client->task_reconstruction_log().Append(job_id, task_id,
data_log.front(),
/*done callback=*/nullptr));
// Append at index 0 will fail.
RAY_CHECK_OK(client->task_reconstruction_log().AppendAt(
job_id, task_id, data_log[1],
/*done callback=*/nullptr, failure_callback, /*log_length=*/0));
// Append at index 2 will fail.
RAY_CHECK_OK(client->task_reconstruction_log().AppendAt(
job_id, task_id, data_log[1],
/*done callback=*/nullptr, failure_callback, /*log_length=*/2));
// Append at index 1 will succeed.
RAY_CHECK_OK(client->task_reconstruction_log().AppendAt(
job_id, task_id, data_log[1],
/*done callback=*/nullptr, failure_callback, /*log_length=*/1));
auto lookup_callback = [node_manager_ids](
gcs::RedisGcsClient *client, const TaskID &id,
const std::vector<TaskReconstructionData> &data) {
std::vector<std::string> appended_managers;
for (const auto &entry : data) {
appended_managers.push_back(entry.node_manager_id());
}
ASSERT_EQ(appended_managers, node_manager_ids);
test->Stop();
};
RAY_CHECK_OK(
client->task_reconstruction_log().Lookup(job_id, task_id, lookup_callback));
// Run the event loop. The loop will only stop if the Lookup callback is
// called (or an assertion failure).
test->Start();
ASSERT_EQ(test->NumCallbacks(), 2);
}
};
TEST_F(TestGcsWithAsio, TestLogLookup) {
test = this;
LogLookupTestHelper::TestLogLookup(job_id_, client_);
}
TEST_TASK_TABLE_MACRO(TestGcsWithAsio, TestTableLookupFailure);
TEST_F(TestGcsWithAsio, TestLogAppendAt) {
test = this;
LogLookupTestHelper::TestLogAppendAt(job_id_, client_);
}
class SetTestHelper {
public:
static void TestSet(const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client) {
// Add some entries to the set at an object ID.
ObjectID object_id = ObjectID::FromRandom();
std::vector<std::string> managers = {"abc", "def", "ghi"};
for (auto &manager : managers) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(manager);
// Check that we added the correct object entries.
auto add_callback = [object_id, data](gcs::RedisGcsClient *client,
const ObjectID &id,
const ObjectTableData &d) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data->manager(), d.manager());
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->object_table().Add(job_id, object_id, data, add_callback));
}
// Check that lookup returns the added object entries.
auto lookup_callback = [object_id, managers](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectTableData> &data) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data.size(), managers.size());
test->IncrementNumCallbacks();
};
// Do a lookup at the object ID.
RAY_CHECK_OK(client->object_table().Lookup(job_id, object_id, lookup_callback));
for (auto &manager : managers) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(manager);
// Check that we added the correct object entries.
auto remove_entry_callback = [object_id, data](gcs::RedisGcsClient *client,
const ObjectID &id,
const ObjectTableData &d) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data->manager(), d.manager());
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(
client->object_table().Remove(job_id, object_id, data, remove_entry_callback));
}
// Check that the entries are removed.
auto lookup_callback2 = [object_id, managers](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectTableData> &data) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data.size(), 0);
test->IncrementNumCallbacks();
test->Stop();
};
// Do a lookup at the object ID.
RAY_CHECK_OK(client->object_table().Lookup(job_id, object_id, lookup_callback2));
// Run the event loop. The loop will only stop if the Lookup callback is
// called (or an assertion failure).
test->Start();
ASSERT_EQ(test->NumCallbacks(), managers.size() * 2 + 2);
}
static void TestDeleteKeysFromSet(
const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client,
std::vector<std::shared_ptr<ObjectTableData>> &data_vector) {
std::vector<ObjectID> ids;
ObjectID object_id;
for (auto &data : data_vector) {
object_id = ObjectID::FromRandom();
ids.push_back(object_id);
// Check that we added the correct object entries.
auto add_callback = [object_id, data](gcs::RedisGcsClient *client,
const ObjectID &id,
const ObjectTableData &d) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data->manager(), d.manager());
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->object_table().Add(job_id, object_id, data, add_callback));
}
for (const auto &object_id : ids) {
// Check that lookup returns the added object entries.
auto lookup_callback = [object_id, data_vector](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectTableData> &data) {
ASSERT_EQ(id, object_id);
ASSERT_EQ(data.size(), 1);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->object_table().Lookup(job_id, object_id, lookup_callback));
}
if (ids.size() == 1) {
client->object_table().Delete(job_id, ids[0]);
} else {
client->object_table().Delete(job_id, ids);
}
for (const auto &object_id : ids) {
auto lookup_callback = [object_id](gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectTableData> &data) {
ASSERT_EQ(id, object_id);
ASSERT_TRUE(data.size() == 0);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->object_table().Lookup(job_id, object_id, lookup_callback));
}
}
static void TestSetSubscribeAll(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
std::vector<ObjectID> object_ids;
for (int i = 0; i < 3; i++) {
object_ids.emplace_back(ObjectID::FromRandom());
}
std::vector<std::string> managers = {"abc", "def", "ghi"};
// Callback for a notification.
auto notification_callback =
[object_ids, managers](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectChangeNotification> ¬ifications) {
if (test->NumCallbacks() < 3 * 3) {
ASSERT_EQ(notifications[0].GetGcsChangeMode(), GcsChangeMode::APPEND_OR_ADD);
} else {
ASSERT_EQ(notifications[0].GetGcsChangeMode(), GcsChangeMode::REMOVE);
}
ASSERT_EQ(id, object_ids[test->NumCallbacks() / 3 % 3]);
// Check that we get notifications in the same order as the writes.
for (const auto &entry : notifications[0].GetData()) {
ASSERT_EQ(entry.manager(), managers[test->NumCallbacks() % 3]);
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == object_ids.size() * 3 * 2) {
test->Stop();
}
};
// Callback for subscription success. We are guaranteed to receive
// notifications after this is called.
auto subscribe_callback = [job_id, object_ids,
managers](gcs::RedisGcsClient *client) {
// We have subscribed. Do the writes to the table.
for (size_t i = 0; i < object_ids.size(); i++) {
for (size_t j = 0; j < managers.size(); j++) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(managers[j]);
for (int k = 0; k < 3; k++) {
// Add the same entry several times.
// Expect no notification if the entry already exists.
RAY_CHECK_OK(
client->object_table().Add(job_id, object_ids[i], data, nullptr));
}
}
}
for (size_t i = 0; i < object_ids.size(); i++) {
for (size_t j = 0; j < managers.size(); j++) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(managers[j]);
for (int k = 0; k < 3; k++) {
// Remove the same entry several times.
// Expect no notification if the entry doesn't exist.
RAY_CHECK_OK(
client->object_table().Remove(job_id, object_ids[i], data, nullptr));
}
}
}
};
// Subscribe to all driver table notifications. Once we have successfully
// subscribed, we will append to the key several times and check that we get
// notified for each.
RAY_CHECK_OK(client->object_table().Subscribe(
job_id, ClientID::Nil(), notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called (or an assertion failure).
test->Start();
// Check that we received one notification callback for each write.
ASSERT_EQ(test->NumCallbacks(), object_ids.size() * 3 * 2);
}
static void TestSetSubscribeId(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Add a set entry.
ObjectID object_id1 = ObjectID::FromRandom();
std::vector<std::string> managers1 = {"abc", "def", "ghi"};
auto data1 = std::make_shared<ObjectTableData>();
data1->set_manager(managers1[0]);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id1, data1, nullptr));
// Add a set entry at a second key.
ObjectID object_id2 = ObjectID::FromRandom();
std::vector<std::string> managers2 = {"jkl", "mno", "pqr"};
auto data2 = std::make_shared<ObjectTableData>();
data2->set_manager(managers2[0]);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id2, data2, nullptr));
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto notification_callback =
[object_id2, managers2](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectChangeNotification> ¬ifications) {
ASSERT_EQ(notifications[0].GetGcsChangeMode(), GcsChangeMode::APPEND_OR_ADD);
// Check that we only get notifications for the requested key.
ASSERT_EQ(id, object_id2);
// Check that we get notifications in the same order as the writes.
for (const auto &entry : notifications[0].GetData()) {
ASSERT_EQ(entry.manager(), managers2[test->NumCallbacks()]);
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == managers2.size()) {
test->Stop();
}
};
// The callback for subscription success. Once we've subscribed, request
// notifications for only one of the keys, then write to both keys.
auto subscribe_callback = [job_id, object_id1, object_id2, managers1,
managers2](gcs::RedisGcsClient *client) {
// Request notifications for one of the keys.
RAY_CHECK_OK(client->object_table().RequestNotifications(job_id, object_id2,
local_client_id, nullptr));
// Write both keys. We should only receive notifications for the key that
// we requested them for.
auto remaining = std::vector<std::string>(++managers1.begin(), managers1.end());
for (const auto &manager : remaining) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(manager);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id1, data, nullptr));
}
remaining = std::vector<std::string>(++managers2.begin(), managers2.end());
for (const auto &manager : remaining) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(manager);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id2, data, nullptr));
}
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->object_table().Subscribe(
job_id, local_client_id, notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that we received one notification callback for each write to the
// requested key.
ASSERT_EQ(test->NumCallbacks(), managers2.size());
}
static void TestSetSubscribeCancel(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Add a set entry.
ObjectID object_id = ObjectID::FromRandom();
std::vector<std::string> managers = {"jkl", "mno", "pqr"};
auto data = std::make_shared<ObjectTableData>();
data->set_manager(managers[0]);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id, data, nullptr));
// The callback for a notification from the object table. This should only be
// received for the object that we requested notifications for.
auto notification_callback =
[object_id, managers](
gcs::RedisGcsClient *client, const ObjectID &id,
const std::vector<ObjectChangeNotification> ¬ifications) {
ASSERT_EQ(notifications[0].GetGcsChangeMode(), GcsChangeMode::APPEND_OR_ADD);
ASSERT_EQ(id, object_id);
// Check that we get a duplicate notification for the first write. We get a
// duplicate notification because notifications
// are canceled after the first write, then requested again.
const std::vector<ObjectTableData> &data = notifications[0].GetData();
if (data.size() == 1) {
// first notification
ASSERT_EQ(data[0].manager(), managers[0]);
test->IncrementNumCallbacks();
} else {
// second notification
ASSERT_EQ(data.size(), managers.size());
std::unordered_set<std::string> managers_set(managers.begin(),
managers.end());
std::unordered_set<std::string> data_managers_set;
for (const auto &entry : data) {
data_managers_set.insert(entry.manager());
test->IncrementNumCallbacks();
}
ASSERT_EQ(managers_set, data_managers_set);
}
if (test->NumCallbacks() == managers.size() + 1) {
test->Stop();
}
};
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto subscribe_callback = [job_id, object_id, managers](gcs::RedisGcsClient *client) {
// Request notifications, then cancel immediately. We should receive a
// notification for the current value at the key.
RAY_CHECK_OK(client->object_table().RequestNotifications(job_id, object_id,
local_client_id, nullptr));
RAY_CHECK_OK(client->object_table().CancelNotifications(job_id, object_id,
local_client_id, nullptr));
// Add to the key. Since we canceled notifications, we should not
// receive a notification for these writes.
auto remaining = std::vector<std::string>(++managers.begin(), managers.end());
for (const auto &manager : remaining) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(manager);
RAY_CHECK_OK(client->object_table().Add(job_id, object_id, data, nullptr));
}
// Request notifications again. We should receive a notification for the
// current values at the key.
RAY_CHECK_OK(client->object_table().RequestNotifications(job_id, object_id,
local_client_id, nullptr));
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->object_table().Subscribe(
job_id, local_client_id, notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that we received a notification callback for the first append to the
// key, then a notification for all of the appends, because we cancel
// notifications in between.
ASSERT_EQ(test->NumCallbacks(), managers.size() + 1);
}
};
TEST_F(TestGcsWithAsio, TestSet) {
test = this;
SetTestHelper::TestSet(job_id_, client_);
}
class LogDeleteTestHelper {
public:
static void TestDeleteKeysFromLog(
const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client,
std::vector<std::shared_ptr<TaskReconstructionData>> &data_vector) {
std::vector<TaskID> ids;
TaskID task_id;
for (auto &data : data_vector) {
task_id = RandomTaskId();
ids.push_back(task_id);
// Check that we added the correct object entries.
auto add_callback = [task_id, data](gcs::RedisGcsClient *client, const TaskID &id,
const TaskReconstructionData &d) {
ASSERT_EQ(id, task_id);
ASSERT_EQ(data->node_manager_id(), d.node_manager_id());
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(
client->task_reconstruction_log().Append(job_id, task_id, data, add_callback));
}
for (const auto &task_id : ids) {
// Check that lookup returns the added object entries.
auto lookup_callback = [task_id, data_vector](
gcs::RedisGcsClient *client, const TaskID &id,
const std::vector<TaskReconstructionData> &data) {
ASSERT_EQ(id, task_id);
ASSERT_EQ(data.size(), 1);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(
client->task_reconstruction_log().Lookup(job_id, task_id, lookup_callback));
}
if (ids.size() == 1) {
client->task_reconstruction_log().Delete(job_id, ids[0]);
} else {
client->task_reconstruction_log().Delete(job_id, ids);
}
for (const auto &task_id : ids) {
auto lookup_callback = [task_id](gcs::RedisGcsClient *client, const TaskID &id,
const std::vector<TaskReconstructionData> &data) {
ASSERT_EQ(id, task_id);
ASSERT_TRUE(data.size() == 0);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(
client->task_reconstruction_log().Lookup(job_id, task_id, lookup_callback));
}
}
};
// Test delete function for keys of Log or Table.
void TestDeleteKeys(const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client) {
// Test delete function for keys of Log.
std::vector<std::shared_ptr<TaskReconstructionData>> task_reconstruction_vector;
auto AppendTaskReconstructionData = [&task_reconstruction_vector](size_t add_count) {
for (size_t i = 0; i < add_count; ++i) {
auto data = std::make_shared<TaskReconstructionData>();
data->set_node_manager_id(ObjectID::FromRandom().Hex());
task_reconstruction_vector.push_back(data);
}
};
// Test one element case.
AppendTaskReconstructionData(1);
ASSERT_EQ(task_reconstruction_vector.size(), 1);
LogDeleteTestHelper::TestDeleteKeysFromLog(job_id, client, task_reconstruction_vector);
// Test the case for more than one elements and less than
// maximum_gcs_deletion_batch_size.
AppendTaskReconstructionData(RayConfig::instance().maximum_gcs_deletion_batch_size() /
2);
ASSERT_GT(task_reconstruction_vector.size(), 1);
ASSERT_LT(task_reconstruction_vector.size(),
RayConfig::instance().maximum_gcs_deletion_batch_size());
LogDeleteTestHelper::TestDeleteKeysFromLog(job_id, client, task_reconstruction_vector);
// Test the case for more than maximum_gcs_deletion_batch_size.
// The Delete function will split the data into two commands.
AppendTaskReconstructionData(RayConfig::instance().maximum_gcs_deletion_batch_size() /
2);
ASSERT_GT(task_reconstruction_vector.size(),
RayConfig::instance().maximum_gcs_deletion_batch_size());
LogDeleteTestHelper::TestDeleteKeysFromLog(job_id, client, task_reconstruction_vector);
// Test delete function for keys of Table.
std::vector<std::shared_ptr<TaskTableData>> task_vector;
auto AppendTaskData = [&task_vector](size_t add_count) {
for (size_t i = 0; i < add_count; ++i) {
task_vector.push_back(TaskTableTestHelper::CreateTaskTableData(RandomTaskId()));
}
};
AppendTaskData(1);
ASSERT_EQ(task_vector.size(), 1);
TaskTableTestHelper::TestDeleteKeysFromTable(job_id, client, task_vector, false);
AppendTaskData(RayConfig::instance().maximum_gcs_deletion_batch_size() / 2);
ASSERT_GT(task_vector.size(), 1);
ASSERT_LT(task_vector.size(), RayConfig::instance().maximum_gcs_deletion_batch_size());
TaskTableTestHelper::TestDeleteKeysFromTable(job_id, client, task_vector, false);
AppendTaskData(RayConfig::instance().maximum_gcs_deletion_batch_size() / 2);
ASSERT_GT(task_vector.size(), RayConfig::instance().maximum_gcs_deletion_batch_size());
TaskTableTestHelper::TestDeleteKeysFromTable(job_id, client, task_vector, true);
test->Start();
ASSERT_GT(test->NumCallbacks(),
9 * RayConfig::instance().maximum_gcs_deletion_batch_size());
// Test delete function for keys of Set.
std::vector<std::shared_ptr<ObjectTableData>> object_vector;
auto AppendObjectData = [&object_vector](size_t add_count) {
for (size_t i = 0; i < add_count; ++i) {
auto data = std::make_shared<ObjectTableData>();
data->set_manager(ObjectID::FromRandom().Hex());
object_vector.push_back(data);
}
};
// Test one element case.
AppendObjectData(1);
ASSERT_EQ(object_vector.size(), 1);
SetTestHelper::TestDeleteKeysFromSet(job_id, client, object_vector);
// Test the case for more than one elements and less than
// maximum_gcs_deletion_batch_size.
AppendObjectData(RayConfig::instance().maximum_gcs_deletion_batch_size() / 2);
ASSERT_GT(object_vector.size(), 1);
ASSERT_LT(object_vector.size(),
RayConfig::instance().maximum_gcs_deletion_batch_size());
SetTestHelper::TestDeleteKeysFromSet(job_id, client, object_vector);
// Test the case for more than maximum_gcs_deletion_batch_size.
// The Delete function will split the data into two commands.
AppendObjectData(RayConfig::instance().maximum_gcs_deletion_batch_size() / 2);
ASSERT_GT(object_vector.size(),
RayConfig::instance().maximum_gcs_deletion_batch_size());
SetTestHelper::TestDeleteKeysFromSet(job_id, client, object_vector);
}
TEST_F(TestGcsWithAsio, TestDeleteKey) {
test = this;
TestDeleteKeys(job_id_, client_);
}
/// A helper class for Log Subscribe testing.
class LogSubscribeTestHelper {
public:
static void TestLogSubscribeAll(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
std::vector<JobID> job_ids;
for (int i = 0; i < 3; i++) {
job_ids.emplace_back(NextJobID());
}
// Callback for a notification.
auto notification_callback = [job_ids](gcs::RedisGcsClient *client, const JobID &id,
const std::vector<JobTableData> data) {
ASSERT_EQ(id, job_ids[test->NumCallbacks()]);
// Check that we get notifications in the same order as the writes.
for (const auto &entry : data) {
ASSERT_EQ(entry.job_id(), job_ids[test->NumCallbacks()].Binary());
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == job_ids.size()) {
test->Stop();
}
};
// Callback for subscription success. We are guaranteed to receive
// notifications after this is called.
auto subscribe_callback = [job_ids](gcs::RedisGcsClient *client) {
// We have subscribed. Do the writes to the table.
for (size_t i = 0; i < job_ids.size(); i++) {
auto job_info_ptr = CreateJobTableData(job_ids[i], false, 0, "localhost", 1);
RAY_CHECK_OK(
client->job_table().Append(job_ids[i], job_ids[i], job_info_ptr, nullptr));
}
};
// Subscribe to all driver table notifications. Once we have successfully
// subscribed, we will append to the key several times and check that we get
// notified for each.
RAY_CHECK_OK(client->job_table().Subscribe(
job_id, ClientID::Nil(), notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called (or an assertion failure).
test->Start();
// Check that we received one notification callback for each write.
ASSERT_EQ(test->NumCallbacks(), job_ids.size());
}
static void TestLogSubscribeId(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Add a log entry.
JobID job_id1 = NextJobID();
std::vector<std::string> job_ids1 = {"abc", "def", "ghi"};
auto data1 = std::make_shared<JobTableData>();
data1->set_job_id(job_ids1[0]);
RAY_CHECK_OK(client->job_table().Append(job_id, job_id1, data1, nullptr));
// Add a log entry at a second key.
JobID job_id2 = NextJobID();
std::vector<std::string> job_ids2 = {"jkl", "mno", "pqr"};
auto data2 = std::make_shared<JobTableData>();
data2->set_job_id(job_ids2[0]);
RAY_CHECK_OK(client->job_table().Append(job_id, job_id2, data2, nullptr));
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto notification_callback = [job_id2, job_ids2](
gcs::RedisGcsClient *client, const JobID &id,
const std::vector<JobTableData> &data) {
// Check that we only get notifications for the requested key.
ASSERT_EQ(id, job_id2);
// Check that we get notifications in the same order as the writes.
for (const auto &entry : data) {
ASSERT_EQ(entry.job_id(), job_ids2[test->NumCallbacks()]);
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == job_ids2.size()) {
test->Stop();
}
};
// The callback for subscription success. Once we've subscribed, request
// notifications for only one of the keys, then write to both keys.
auto subscribe_callback = [job_id, job_id1, job_id2, job_ids1,
job_ids2](gcs::RedisGcsClient *client) {
// Request notifications for one of the keys.
RAY_CHECK_OK(client->job_table().RequestNotifications(job_id, job_id2,
local_client_id, nullptr));
// Write both keys. We should only receive notifications for the key that
// we requested them for.
auto remaining = std::vector<std::string>(++job_ids1.begin(), job_ids1.end());
for (const auto &job_id_it : remaining) {
auto data = std::make_shared<JobTableData>();
data->set_job_id(job_id_it);
RAY_CHECK_OK(client->job_table().Append(job_id, job_id1, data, nullptr));
}
remaining = std::vector<std::string>(++job_ids2.begin(), job_ids2.end());
for (const auto &job_id_it : remaining) {
auto data = std::make_shared<JobTableData>();
data->set_job_id(job_id_it);
RAY_CHECK_OK(client->job_table().Append(job_id, job_id2, data, nullptr));
}
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->job_table().Subscribe(
job_id, local_client_id, notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that we received one notification callback for each write to the
// requested key.
ASSERT_EQ(test->NumCallbacks(), job_ids2.size());
}
static void TestLogSubscribeCancel(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Add a log entry.
JobID random_job_id = NextJobID();
std::vector<std::string> job_ids = {"jkl", "mno", "pqr"};
auto data = std::make_shared<JobTableData>();
data->set_job_id(job_ids[0]);
RAY_CHECK_OK(client->job_table().Append(job_id, random_job_id, data, nullptr));
// The callback for a notification from the object table. This should only be
// received for the object that we requested notifications for.
auto notification_callback = [random_job_id, job_ids](
gcs::RedisGcsClient *client, const JobID &id,
const std::vector<JobTableData> &data) {
ASSERT_EQ(id, random_job_id);
// Check that we get a duplicate notification for the first write. We get a
// duplicate notification because the log is append-only and notifications
// are canceled after the first write, then requested again.
auto job_ids_copy = job_ids;
job_ids_copy.insert(job_ids_copy.begin(), job_ids_copy.front());
for (const auto &entry : data) {
ASSERT_EQ(entry.job_id(), job_ids_copy[test->NumCallbacks()]);
test->IncrementNumCallbacks();
}
if (test->NumCallbacks() == job_ids_copy.size()) {
test->Stop();
}
};
// The callback for a notification from the table. This should only be
// received for keys that we requested notifications for.
auto subscribe_callback = [job_id, random_job_id,
job_ids](gcs::RedisGcsClient *client) {
// Request notifications, then cancel immediately. We should receive a
// notification for the current value at the key.
RAY_CHECK_OK(client->job_table().RequestNotifications(job_id, random_job_id,
local_client_id, nullptr));
RAY_CHECK_OK(client->job_table().CancelNotifications(job_id, random_job_id,
local_client_id, nullptr));
// Append to the key. Since we canceled notifications, we should not
// receive a notification for these writes.
auto remaining = std::vector<std::string>(++job_ids.begin(), job_ids.end());
for (const auto &remaining_job_id : remaining) {
auto data = std::make_shared<JobTableData>();
data->set_job_id(remaining_job_id);
RAY_CHECK_OK(client->job_table().Append(job_id, random_job_id, data, nullptr));
}
// Request notifications again. We should receive a notification for the
// current values at the key.
RAY_CHECK_OK(client->job_table().RequestNotifications(job_id, random_job_id,
local_client_id, nullptr));
};
// Subscribe to notifications for this client. This allows us to request and
// receive notifications for specific keys.
RAY_CHECK_OK(client->job_table().Subscribe(
job_id, local_client_id, notification_callback, subscribe_callback));
// Run the event loop. The loop will only stop if the registered subscription
// callback is called for the requested key.
test->Start();
// Check that we received a notification callback for the first append to the
// key, then a notification for all of the appends, because we cancel
// notifications in between.
ASSERT_EQ(test->NumCallbacks(), job_ids.size() + 1);
}
};
TEST_F(TestGcsWithAsio, TestLogSubscribeAll) {
test = this;
LogSubscribeTestHelper::TestLogSubscribeAll(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestSetSubscribeAll) {
test = this;
SetTestHelper::TestSetSubscribeAll(job_id_, client_);
}
TEST_TASK_TABLE_MACRO(TestGcsWithAsio, TestTableSubscribeId);
TEST_F(TestGcsWithAsio, TestLogSubscribeId) {
test = this;
LogSubscribeTestHelper::TestLogSubscribeId(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestSetSubscribeId) {
test = this;
SetTestHelper::TestSetSubscribeId(job_id_, client_);
}
TEST_TASK_TABLE_MACRO(TestGcsWithAsio, TestTableSubscribeCancel);
TEST_F(TestGcsWithAsio, TestLogSubscribeCancel) {
test = this;
LogSubscribeTestHelper::TestLogSubscribeCancel(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestSetSubscribeCancel) {
test = this;
SetTestHelper::TestSetSubscribeCancel(job_id_, client_);
}
/// A helper class for ClientTable testing.
class ClientTableTestHelper {
public:
static void ClientTableNotification(std::shared_ptr<gcs::RedisGcsClient> client,
const ClientID &client_id, const GcsNodeInfo &data,
bool is_alive) {
ClientID added_id = local_client_id;
ASSERT_EQ(client_id, added_id);
ASSERT_EQ(ClientID::FromBinary(data.node_id()), added_id);
ASSERT_EQ(data.state() == GcsNodeInfo::ALIVE, is_alive);
GcsNodeInfo cached_client;
ASSERT_TRUE(client->client_table().GetClient(added_id, &cached_client));
ASSERT_EQ(ClientID::FromBinary(cached_client.node_id()), added_id);
ASSERT_EQ(cached_client.state() == GcsNodeInfo::ALIVE, is_alive);
}
static void TestClientTableConnect(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Subscribe to a node gets added and removed. The latter
// event will stop the event loop.
RAY_CHECK_OK(client->client_table().SubscribeToNodeChange(
[client](const ClientID &id, const GcsNodeInfo &data) {
// TODO(micafan)
RAY_LOG(INFO) << "Test alive=" << data.state() << " id=" << id;
if (data.state() == GcsNodeInfo::ALIVE) {
ClientTableNotification(client, id, data, true);
test->Stop();
}
},
nullptr));
// Connect and disconnect to client table. We should receive notifications
// for the addition and removal of our own entry.
GcsNodeInfo local_node_info;
local_node_info.set_node_id(local_client_id.Binary());
local_node_info.set_node_manager_address("127.0.0.1");
local_node_info.set_node_manager_port(0);
local_node_info.set_object_manager_port(0);
RAY_CHECK_OK(client->client_table().Connect(local_node_info));
test->Start();
}
static void TestClientTableDisconnect(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
// Register callbacks for when a client gets added and removed. The latter
// event will stop the event loop.
RAY_CHECK_OK(client->client_table().SubscribeToNodeChange(
[client](const ClientID &id, const GcsNodeInfo &data) {
if (data.state() == GcsNodeInfo::ALIVE) {
ClientTableNotification(client, id, data, /*is_insertion=*/true);
// Disconnect from the client table. We should receive a notification
// for the removal of our own entry.
RAY_CHECK_OK(client->client_table().Disconnect());
} else {
ClientTableNotification(client, id, data, /*is_insertion=*/false);
test->Stop();
}
},
nullptr));
// Connect to the client table. We should receive notification for the
// addition of our own entry.
GcsNodeInfo local_node_info;
local_node_info.set_node_id(local_client_id.Binary());
local_node_info.set_node_manager_address("127.0.0.1");
local_node_info.set_node_manager_port(0);
local_node_info.set_object_manager_port(0);
RAY_CHECK_OK(client->client_table().Connect(local_node_info));
test->Start();
}
static void TestClientTableImmediateDisconnect(
const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client) {
// Register callbacks for when a client gets added and removed. The latter
// event will stop the event loop.
RAY_CHECK_OK(client->client_table().SubscribeToNodeChange(
[client](const ClientID &id, const GcsNodeInfo &data) {
if (data.state() == GcsNodeInfo::ALIVE) {
ClientTableNotification(client, id, data, true);
} else {
ClientTableNotification(client, id, data, false);
test->Stop();
}
},
nullptr));
// Connect to then immediately disconnect from the client table. We should
// receive notifications for the addition and removal of our own entry.
GcsNodeInfo local_node_info;
local_node_info.set_node_id(local_client_id.Binary());
local_node_info.set_node_manager_address("127.0.0.1");
local_node_info.set_node_manager_port(0);
local_node_info.set_object_manager_port(0);
RAY_CHECK_OK(client->client_table().Connect(local_node_info));
RAY_CHECK_OK(client->client_table().Disconnect());
test->Start();
}
static void TestClientTableMarkDisconnected(
const JobID &job_id, std::shared_ptr<gcs::RedisGcsClient> client) {
GcsNodeInfo local_node_info;
local_node_info.set_node_id(local_client_id.Binary());
local_node_info.set_node_manager_address("127.0.0.1");
local_node_info.set_node_manager_port(0);
local_node_info.set_object_manager_port(0);
// Connect to the client table to start receiving notifications.
RAY_CHECK_OK(client->client_table().Connect(local_node_info));
// Mark a different client as dead.
ClientID dead_client_id = ClientID::FromRandom();
RAY_CHECK_OK(client->client_table().MarkDisconnected(dead_client_id, nullptr));
// Make sure we only get a notification for the removal of the client we
// marked as dead.
RAY_CHECK_OK(client->client_table().SubscribeToNodeChange(
[dead_client_id](const UniqueID &id, const GcsNodeInfo &data) {
if (data.state() == GcsNodeInfo::DEAD) {
ASSERT_EQ(ClientID::FromBinary(data.node_id()), dead_client_id);
test->Stop();
}
},
nullptr));
test->Start();
}
};
TEST_F(TestGcsWithAsio, TestClientTableConnect) {
test = this;
ClientTableTestHelper::TestClientTableConnect(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestClientTableDisconnect) {
test = this;
ClientTableTestHelper::TestClientTableDisconnect(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestClientTableImmediateDisconnect) {
test = this;
ClientTableTestHelper::TestClientTableImmediateDisconnect(job_id_, client_);
}
TEST_F(TestGcsWithAsio, TestClientTableMarkDisconnected) {
test = this;
ClientTableTestHelper::TestClientTableMarkDisconnected(job_id_, client_);
}
class HashTableTestHelper {
public:
static void TestHashTable(const JobID &job_id,
std::shared_ptr<gcs::RedisGcsClient> client) {
const int expected_count = 14;
ClientID client_id = ClientID::FromRandom();
// Prepare the first resource map: data_map1.
DynamicResourceTable::DataMap data_map1;
auto cpu_data = std::make_shared<ResourceTableData>();
cpu_data->set_resource_capacity(100);
data_map1.emplace("CPU", cpu_data);
auto gpu_data = std::make_shared<ResourceTableData>();
gpu_data->set_resource_capacity(2);
data_map1.emplace("GPU", gpu_data);
// Prepare the second resource map: data_map2 which decreases CPU,
// increases GPU and add a new CUSTOM compared to data_map1.
DynamicResourceTable::DataMap data_map2;
auto data_cpu = std::make_shared<ResourceTableData>();
data_cpu->set_resource_capacity(50);
data_map2.emplace("CPU", data_cpu);
auto data_gpu = std::make_shared<ResourceTableData>();
data_gpu->set_resource_capacity(10);
data_map2.emplace("GPU", data_gpu);
auto data_custom = std::make_shared<ResourceTableData>();
data_custom->set_resource_capacity(2);
data_map2.emplace("CUSTOM", data_custom);
data_map2["CPU"]->set_resource_capacity(50);
// This is a common comparison function for the test.
auto compare_test = [](const DynamicResourceTable::DataMap &data1,
const DynamicResourceTable::DataMap &data2) {
ASSERT_EQ(data1.size(), data2.size());
for (const auto &data : data1) {
auto iter = data2.find(data.first);
ASSERT_TRUE(iter != data2.end());
ASSERT_EQ(iter->second->resource_capacity(), data.second->resource_capacity());
}
};
auto subscribe_callback = [](RedisGcsClient *client) {
ASSERT_TRUE(true);
test->IncrementNumCallbacks();
};
auto notification_callback =
[data_map1, data_map2, compare_test](
RedisGcsClient *client, const ClientID &id,
const std::vector<ResourceChangeNotification> &result) {
RAY_CHECK(result.size() == 1);
const ResourceChangeNotification ¬ification = result.back();
if (notification.IsRemoved()) {
ASSERT_EQ(notification.GetData().size(), 2);
ASSERT_TRUE(notification.GetData().find("GPU") !=
notification.GetData().end());
ASSERT_TRUE(
notification.GetData().find("CUSTOM") != notification.GetData().end() ||
notification.GetData().find("CPU") != notification.GetData().end());
// The key "None-Existent" will not appear in the notification.
} else {
if (notification.GetData().size() == 2) {
compare_test(data_map1, notification.GetData());
} else if (notification.GetData().size() == 3) {
compare_test(data_map2, notification.GetData());
} else {
ASSERT_TRUE(false);
}
}
test->IncrementNumCallbacks();
// It is not sure which of the notification or lookup callback will come first.
if (test->NumCallbacks() == expected_count) {
test->Stop();
}
};
// Step 0: Subscribe the change of the hash table.
RAY_CHECK_OK(client->resource_table().Subscribe(
job_id, ClientID::Nil(), notification_callback, subscribe_callback));
RAY_CHECK_OK(client->resource_table().RequestNotifications(job_id, client_id,
local_client_id, nullptr));
// Step 1: Add elements to the hash table.
auto update_callback1 = [data_map1, compare_test](
RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
compare_test(data_map1, callback_data);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(
client->resource_table().Update(job_id, client_id, data_map1, update_callback1));
auto lookup_callback1 = [data_map1, compare_test](
RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
compare_test(data_map1, callback_data);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->resource_table().Lookup(job_id, client_id, lookup_callback1));
// Step 2: Decrease one element, increase one and add a new one.
RAY_CHECK_OK(client->resource_table().Update(job_id, client_id, data_map2, nullptr));
auto lookup_callback2 = [data_map2, compare_test](
RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
compare_test(data_map2, callback_data);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->resource_table().Lookup(job_id, client_id, lookup_callback2));
std::vector<std::string> delete_keys({"GPU", "CUSTOM", "None-Existent"});
auto remove_callback = [delete_keys](RedisGcsClient *client, const ClientID &id,
const std::vector<std::string> &callback_data) {
for (size_t i = 0; i < callback_data.size(); ++i) {
// All deleting keys exist in this argument even if the key doesn't exist.
ASSERT_EQ(callback_data[i], delete_keys[i]);
}
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->resource_table().RemoveEntries(job_id, client_id, delete_keys,
remove_callback));
DynamicResourceTable::DataMap data_map3(data_map2);
data_map3.erase("GPU");
data_map3.erase("CUSTOM");
auto lookup_callback3 = [data_map3, compare_test](
RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
compare_test(data_map3, callback_data);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->resource_table().Lookup(job_id, client_id, lookup_callback3));
// Step 3: Reset the the resources to data_map1.
RAY_CHECK_OK(
client->resource_table().Update(job_id, client_id, data_map1, update_callback1));
auto lookup_callback4 = [data_map1, compare_test](
RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
compare_test(data_map1, callback_data);
test->IncrementNumCallbacks();
};
RAY_CHECK_OK(client->resource_table().Lookup(job_id, client_id, lookup_callback4));
// Step 4: Removing all elements will remove the home Hash table from GCS.
RAY_CHECK_OK(client->resource_table().RemoveEntries(
job_id, client_id, {"GPU", "CPU", "CUSTOM", "None-Existent"}, nullptr));
auto lookup_callback5 = [](RedisGcsClient *client, const ClientID &id,
const DynamicResourceTable::DataMap &callback_data) {
ASSERT_EQ(callback_data.size(), 0);
test->IncrementNumCallbacks();
// It is not sure which of notification or lookup callback will come first.
if (test->NumCallbacks() == expected_count) {
test->Stop();
}
};
RAY_CHECK_OK(client->resource_table().Lookup(job_id, client_id, lookup_callback5));
test->Start();
ASSERT_EQ(test->NumCallbacks(), expected_count);
}
};
TEST_F(TestGcsWithAsio, TestHashTable) {
test = this;
HashTableTestHelper::TestHashTable(job_id_, client_);
}
#undef TEST_TASK_TABLE_MACRO
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/redis_job_info_accessor_test.cc
|
C++
|
#include <memory>
#include "gtest/gtest.h"
#include "ray/gcs/pb_util.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/test/accessor_test_base.h"
#include "ray/util/test_util.h"
namespace ray {
namespace gcs {
class RedisJobInfoAccessorTest : public AccessorTestBase<JobID, JobTableData> {
protected:
virtual void GenTestData() {
for (size_t i = 0; i < total_job_number_; ++i) {
JobID job_id = JobID::FromInt(i);
std::shared_ptr<JobTableData> job_data_ptr =
CreateJobTableData(job_id, /*is_dead*/ false, /*timestamp*/ 1,
/*node_manager_address*/ "", /*driver_pid*/ i);
id_to_data_[job_id] = job_data_ptr;
}
}
std::atomic<int> subscribe_pending_count_{0};
size_t total_job_number_{100};
};
TEST_F(RedisJobInfoAccessorTest, AddAndSubscribe) {
JobInfoAccessor &job_accessor = gcs_client_->Jobs();
// SubscribeAll
auto on_subscribe = [this](const JobID &job_id, const JobTableData &data) {
const auto it = id_to_data_.find(job_id);
RAY_CHECK(it != id_to_data_.end());
ASSERT_TRUE(data.is_dead());
--subscribe_pending_count_;
};
auto on_done = [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
};
++pending_count_;
RAY_CHECK_OK(job_accessor.AsyncSubscribeToFinishedJobs(on_subscribe, on_done));
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(subscribe_pending_count_, wait_pending_timeout_);
// Register
for (const auto &item : id_to_data_) {
++pending_count_;
RAY_CHECK_OK(job_accessor.AsyncAdd(item.second, [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(subscribe_pending_count_, wait_pending_timeout_);
// Update
for (auto &item : id_to_data_) {
++pending_count_;
++subscribe_pending_count_;
RAY_CHECK_OK(job_accessor.AsyncMarkFinished(item.first, [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(subscribe_pending_count_, wait_pending_timeout_);
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/redis_node_info_accessor_test.cc
|
C++
|
#include <memory>
#include "gtest/gtest.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/test/accessor_test_base.h"
namespace ray {
namespace gcs {
class NodeDynamicResourceTest : public AccessorTestBase<ClientID, ResourceTableData> {
protected:
typedef NodeInfoAccessor::ResourceMap ResourceMap;
virtual void GenTestData() {
for (size_t node_index = 0; node_index < node_number_; ++node_index) {
ClientID id = ClientID::FromRandom();
ResourceMap resource_map;
for (size_t rs_index = 0; rs_index < resource_type_number_; ++rs_index) {
std::shared_ptr<ResourceTableData> rs_data =
std::make_shared<ResourceTableData>();
rs_data->set_resource_capacity(rs_index);
std::string resource_name = std::to_string(rs_index);
resource_map[resource_name] = rs_data;
if (resource_to_delete_.empty()) {
resource_to_delete_.emplace_back(resource_name);
}
}
id_to_resource_map_[id] = std::move(resource_map);
}
}
std::unordered_map<ClientID, ResourceMap> id_to_resource_map_;
size_t node_number_{100};
size_t resource_type_number_{5};
std::vector<std::string> resource_to_delete_;
std::atomic<int> sub_pending_count_{0};
std::atomic<int> do_sub_pending_count_{0};
};
TEST_F(NodeDynamicResourceTest, UpdateAndGet) {
NodeInfoAccessor &node_accessor = gcs_client_->Nodes();
for (const auto &node_rs : id_to_resource_map_) {
++pending_count_;
const ClientID &id = node_rs.first;
// Update
Status status = node_accessor.AsyncUpdateResources(
node_rs.first, node_rs.second, [this, &node_accessor, id](Status status) {
RAY_CHECK_OK(status);
auto get_callback = [this, id](Status status,
const boost::optional<ResourceMap> &result) {
--pending_count_;
RAY_CHECK_OK(status);
const auto it = id_to_resource_map_.find(id);
ASSERT_TRUE(result);
ASSERT_EQ(it->second.size(), result->size());
};
// Get
status = node_accessor.AsyncGetResources(id, get_callback);
RAY_CHECK_OK(status);
});
}
WaitPendingDone(wait_pending_timeout_);
}
TEST_F(NodeDynamicResourceTest, Delete) {
NodeInfoAccessor &node_accessor = gcs_client_->Nodes();
for (const auto &node_rs : id_to_resource_map_) {
++pending_count_;
// Update
Status status = node_accessor.AsyncUpdateResources(node_rs.first, node_rs.second,
[this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
});
}
WaitPendingDone(wait_pending_timeout_);
for (const auto &node_rs : id_to_resource_map_) {
++pending_count_;
const ClientID &id = node_rs.first;
// Delete
Status status = node_accessor.AsyncDeleteResources(
id, resource_to_delete_, [this, &node_accessor, id](Status status) {
RAY_CHECK_OK(status);
// Get
status = node_accessor.AsyncGetResources(
id, [this, id](Status status, const boost::optional<ResourceMap> &result) {
--pending_count_;
RAY_CHECK_OK(status);
const auto it = id_to_resource_map_.find(id);
ASSERT_TRUE(result);
ASSERT_EQ(it->second.size() - resource_to_delete_.size(), result->size());
});
});
}
WaitPendingDone(wait_pending_timeout_);
}
TEST_F(NodeDynamicResourceTest, Subscribe) {
NodeInfoAccessor &node_accessor = gcs_client_->Nodes();
for (const auto &node_rs : id_to_resource_map_) {
++pending_count_;
// Update
Status status = node_accessor.AsyncUpdateResources(node_rs.first, node_rs.second,
[this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
});
}
WaitPendingDone(wait_pending_timeout_);
auto subscribe = [this](const ClientID &id,
const ResourceChangeNotification ¬ification) {
RAY_LOG(INFO) << "receive client id=" << id;
auto it = id_to_resource_map_.find(id);
ASSERT_TRUE(it != id_to_resource_map_.end());
if (notification.IsAdded()) {
ASSERT_EQ(notification.GetData().size(), it->second.size());
} else {
ASSERT_EQ(notification.GetData().size(), resource_to_delete_.size());
}
--sub_pending_count_;
};
auto done = [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
};
// Subscribe
++pending_count_;
Status status = node_accessor.AsyncSubscribeToResources(subscribe, done);
RAY_CHECK_OK(status);
for (const auto &node_rs : id_to_resource_map_) {
// Delete
++pending_count_;
++sub_pending_count_;
Status status = node_accessor.AsyncDeleteResources(node_rs.first, resource_to_delete_,
[this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
});
RAY_CHECK_OK(status);
}
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/redis_object_info_accessor_test.cc
|
C++
|
#include <unordered_map>
#include <vector>
#include "gtest/gtest.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/test/accessor_test_base.h"
#include "ray/util/test_util.h"
namespace ray {
namespace gcs {
class RedisObjectInfoAccessorTest : public AccessorTestBase<ObjectID, ObjectTableData> {
protected:
void GenTestData() {
for (size_t i = 0; i < object_count_; ++i) {
ObjectVector object_vec;
for (size_t j = 0; j < copy_count_; ++j) {
auto object = std::make_shared<ObjectTableData>();
ClientID node_id = ClientID::FromRandom();
object->set_manager(node_id.Binary());
object_vec.emplace_back(std::move(object));
}
ObjectID id = ObjectID::FromRandom();
object_id_to_data_[id] = object_vec;
}
}
typedef std::vector<std::shared_ptr<ObjectTableData>> ObjectVector;
std::unordered_map<ObjectID, ObjectVector> object_id_to_data_;
size_t object_count_{100};
size_t copy_count_{5};
};
TEST_F(RedisObjectInfoAccessorTest, TestGetAddRemove) {
ObjectInfoAccessor &object_accessor = gcs_client_->Objects();
// add && get
// add
for (const auto &elem : object_id_to_data_) {
for (const auto &item : elem.second) {
++pending_count_;
ClientID node_id = ClientID::FromBinary(item->manager());
RAY_CHECK_OK(
object_accessor.AsyncAddLocation(elem.first, node_id, [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
}
WaitPendingDone(wait_pending_timeout_);
// get
for (const auto &elem : object_id_to_data_) {
++pending_count_;
size_t total_size = elem.second.size();
RAY_CHECK_OK(object_accessor.AsyncGetLocations(
elem.first,
[this, total_size](Status status, const std::vector<ObjectTableData> &result) {
RAY_CHECK_OK(status);
RAY_CHECK(total_size == result.size());
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
RAY_LOG(INFO) << "Case Add && Get done.";
// subscribe && delete
// subscribe
std::atomic<int> sub_pending_count(0);
auto subscribe = [this, &sub_pending_count](const ObjectID &object_id,
const ObjectChangeNotification &result) {
const auto it = object_id_to_data_.find(object_id);
ASSERT_TRUE(it != object_id_to_data_.end());
static size_t response_count = 1;
size_t cur_count = response_count <= object_count_ ? copy_count_ : 1;
ASSERT_EQ(result.GetData().size(), cur_count);
rpc::GcsChangeMode change_mode = response_count <= object_count_
? rpc::GcsChangeMode::APPEND_OR_ADD
: rpc::GcsChangeMode::REMOVE;
ASSERT_EQ(change_mode, result.GetGcsChangeMode());
++response_count;
--sub_pending_count;
};
for (const auto &elem : object_id_to_data_) {
++pending_count_;
++sub_pending_count;
RAY_CHECK_OK(object_accessor.AsyncSubscribeToLocations(elem.first, subscribe,
[this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(sub_pending_count, wait_pending_timeout_);
// delete
for (const auto &elem : object_id_to_data_) {
++pending_count_;
++sub_pending_count;
const ObjectVector &object_vec = elem.second;
ClientID node_id = ClientID::FromBinary(object_vec[0]->manager());
RAY_CHECK_OK(
object_accessor.AsyncRemoveLocation(elem.first, node_id, [this](Status status) {
RAY_CHECK_OK(status);
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
WaitPendingDone(sub_pending_count, wait_pending_timeout_);
// get
for (const auto &elem : object_id_to_data_) {
++pending_count_;
size_t total_size = elem.second.size();
RAY_CHECK_OK(object_accessor.AsyncGetLocations(
elem.first,
[this, total_size](Status status, const std::vector<ObjectTableData> &result) {
RAY_CHECK_OK(status);
ASSERT_EQ(total_size - 1, result.size());
--pending_count_;
}));
}
WaitPendingDone(wait_pending_timeout_);
RAY_LOG(INFO) << "Case Subscribe && Delete done.";
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/gcs/test/subscription_executor_test.cc
|
C++
|
#include "ray/gcs/subscription_executor.h"
#include "gtest/gtest.h"
#include "ray/gcs/callback.h"
#include "ray/gcs/entry_change_notification.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/gcs/test/accessor_test_base.h"
namespace ray {
namespace gcs {
class SubscriptionExecutorTest : public AccessorTestBase<ActorID, ActorTableData> {
public:
typedef SubscriptionExecutor<ActorID, ActorTableData, ActorTable> ActorSubExecutor;
virtual void SetUp() {
AccessorTestBase<ActorID, ActorTableData>::SetUp();
actor_sub_executor_.reset(new ActorSubExecutor(gcs_client_->actor_table()));
subscribe_ = [this](const ActorID &id, const ActorTableData &data) {
const auto it = id_to_data_.find(id);
ASSERT_TRUE(it != id_to_data_.end());
--sub_pending_count_;
};
sub_done_ = [this](Status status) {
ASSERT_TRUE(status.ok()) << status;
--do_sub_pending_count_;
};
unsub_done_ = [this](Status status) {
ASSERT_TRUE(status.ok()) << status;
--do_unsub_pending_count_;
};
}
virtual void TearDown() {
AccessorTestBase<ActorID, ActorTableData>::TearDown();
ASSERT_EQ(sub_pending_count_, 0);
ASSERT_EQ(do_sub_pending_count_, 0);
ASSERT_EQ(do_unsub_pending_count_, 0);
}
protected:
virtual void GenTestData() {
for (size_t i = 0; i < 100; ++i) {
std::shared_ptr<ActorTableData> actor = std::make_shared<ActorTableData>();
actor->set_max_reconstructions(1);
actor->set_remaining_reconstructions(1);
JobID job_id = JobID::FromInt(i);
actor->set_job_id(job_id.Binary());
actor->set_state(ActorTableData::ALIVE);
ActorID actor_id = ActorID::Of(job_id, RandomTaskId(), /*parent_task_counter=*/i);
actor->set_actor_id(actor_id.Binary());
id_to_data_[actor_id] = actor;
}
}
size_t AsyncRegisterActorToGcs() {
ActorInfoAccessor &actor_accessor = gcs_client_->Actors();
for (const auto &elem : id_to_data_) {
const auto &actor = elem.second;
auto done = [this](Status status) {
ASSERT_TRUE(status.ok());
--pending_count_;
};
++pending_count_;
Status status = actor_accessor.AsyncRegister(actor, done);
RAY_CHECK_OK(status);
}
return id_to_data_.size();
}
protected:
std::unique_ptr<ActorSubExecutor> actor_sub_executor_;
std::atomic<int> sub_pending_count_{0};
std::atomic<int> do_sub_pending_count_{0};
std::atomic<int> do_unsub_pending_count_{0};
SubscribeCallback<ActorID, ActorTableData> subscribe_{nullptr};
StatusCallback sub_done_{nullptr};
StatusCallback unsub_done_{nullptr};
};
TEST_F(SubscriptionExecutorTest, SubscribeAllTest) {
++do_sub_pending_count_;
Status status =
actor_sub_executor_->AsyncSubscribeAll(ClientID::Nil(), subscribe_, sub_done_);
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
ASSERT_TRUE(status.ok());
sub_pending_count_ = id_to_data_.size();
AsyncRegisterActorToGcs();
status = actor_sub_executor_->AsyncSubscribeAll(ClientID::Nil(), subscribe_, sub_done_);
ASSERT_TRUE(status.IsInvalid());
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
}
TEST_F(SubscriptionExecutorTest, SubscribeOneWithClientIDTest) {
const auto &item = id_to_data_.begin();
++do_sub_pending_count_;
++sub_pending_count_;
Status status = actor_sub_executor_->AsyncSubscribe(ClientID::FromRandom(), item->first,
subscribe_, sub_done_);
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
ASSERT_TRUE(status.ok());
AsyncRegisterActorToGcs();
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
status = actor_sub_executor_->AsyncSubscribe(ClientID::FromRandom(), item->first,
subscribe_, sub_done_);
ASSERT_TRUE(status.IsInvalid());
}
TEST_F(SubscriptionExecutorTest, SubscribeOneAfterActorRegistrationWithClientIDTest) {
const auto &item = id_to_data_.begin();
++do_sub_pending_count_;
++sub_pending_count_;
AsyncRegisterActorToGcs();
Status status = actor_sub_executor_->AsyncSubscribe(ClientID::FromRandom(), item->first,
subscribe_, sub_done_);
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
ASSERT_TRUE(status.ok());
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
status = actor_sub_executor_->AsyncSubscribe(ClientID::FromRandom(), item->first,
subscribe_, sub_done_);
ASSERT_TRUE(status.IsInvalid());
}
TEST_F(SubscriptionExecutorTest, SubscribeAllAndSubscribeOneTest) {
++do_sub_pending_count_;
Status status =
actor_sub_executor_->AsyncSubscribeAll(ClientID::Nil(), subscribe_, sub_done_);
ASSERT_TRUE(status.ok());
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
for (const auto &item : id_to_data_) {
status = actor_sub_executor_->AsyncSubscribe(ClientID::FromRandom(), item.first,
subscribe_, sub_done_);
ASSERT_FALSE(status.ok());
}
sub_pending_count_ = id_to_data_.size();
AsyncRegisterActorToGcs();
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
}
TEST_F(SubscriptionExecutorTest, UnsubscribeTest) {
ClientID client_id = ClientID::FromRandom();
Status status;
for (const auto &item : id_to_data_) {
status = actor_sub_executor_->AsyncUnsubscribe(client_id, item.first, unsub_done_);
ASSERT_TRUE(status.IsInvalid());
}
for (const auto &item : id_to_data_) {
++do_sub_pending_count_;
status =
actor_sub_executor_->AsyncSubscribe(client_id, item.first, subscribe_, sub_done_);
ASSERT_TRUE(status.ok());
}
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
for (const auto &item : id_to_data_) {
++do_unsub_pending_count_;
status = actor_sub_executor_->AsyncUnsubscribe(client_id, item.first, unsub_done_);
ASSERT_TRUE(status.ok());
}
WaitPendingDone(do_unsub_pending_count_, wait_pending_timeout_);
for (const auto &item : id_to_data_) {
status = actor_sub_executor_->AsyncUnsubscribe(client_id, item.first, unsub_done_);
ASSERT_TRUE(!status.ok());
}
for (const auto &item : id_to_data_) {
++do_sub_pending_count_;
status =
actor_sub_executor_->AsyncSubscribe(client_id, item.first, subscribe_, sub_done_);
ASSERT_TRUE(status.ok());
}
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
for (const auto &item : id_to_data_) {
++do_unsub_pending_count_;
status = actor_sub_executor_->AsyncUnsubscribe(client_id, item.first, unsub_done_);
ASSERT_TRUE(status.ok());
}
WaitPendingDone(do_unsub_pending_count_, wait_pending_timeout_);
for (const auto &item : id_to_data_) {
++do_sub_pending_count_;
status =
actor_sub_executor_->AsyncSubscribe(client_id, item.first, subscribe_, sub_done_);
ASSERT_TRUE(status.ok());
}
WaitPendingDone(do_sub_pending_count_, wait_pending_timeout_);
sub_pending_count_ = id_to_data_.size();
AsyncRegisterActorToGcs();
WaitPendingDone(pending_count_, wait_pending_timeout_);
WaitPendingDone(sub_pending_count_, wait_pending_timeout_);
}
} // namespace gcs
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
RAY_CHECK(argc == 4);
ray::REDIS_SERVER_EXEC_PATH = argv[1];
ray::REDIS_CLIENT_EXEC_PATH = argv[2];
ray::REDIS_MODULE_LIBRARY_PATH = argv[3];
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_buffer_pool.cc
|
C++
|
#include "ray/object_manager/object_buffer_pool.h"
#include "ray/common/status.h"
#include "ray/util/logging.h"
namespace ray {
ObjectBufferPool::ObjectBufferPool(const std::string &store_socket_name,
uint64_t chunk_size)
: default_chunk_size_(chunk_size) {
store_socket_name_ = store_socket_name;
RAY_ARROW_CHECK_OK(store_client_.Connect(store_socket_name_.c_str(), "", 0, 300));
}
ObjectBufferPool::~ObjectBufferPool() {
// Abort everything in progress.
auto get_buf_state_copy = get_buffer_state_;
for (const auto &pair : get_buf_state_copy) {
AbortGet(pair.first);
}
auto create_buf_state_copy = create_buffer_state_;
for (const auto &pair : create_buf_state_copy) {
AbortCreate(pair.first);
}
RAY_CHECK(get_buffer_state_.empty());
RAY_CHECK(create_buffer_state_.empty());
RAY_ARROW_CHECK_OK(store_client_.Disconnect());
}
uint64_t ObjectBufferPool::GetNumChunks(uint64_t data_size) {
return (data_size + default_chunk_size_ - 1) / default_chunk_size_;
}
uint64_t ObjectBufferPool::GetBufferLength(uint64_t chunk_index, uint64_t data_size) {
return (chunk_index + 1) * default_chunk_size_ > data_size
? data_size % default_chunk_size_
: default_chunk_size_;
}
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> ObjectBufferPool::GetChunk(
const ObjectID &object_id, uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index) {
std::lock_guard<std::mutex> lock(pool_mutex_);
if (get_buffer_state_.count(object_id) == 0) {
plasma::ObjectBuffer object_buffer;
plasma::ObjectID plasma_id = object_id.ToPlasmaId();
RAY_ARROW_CHECK_OK(store_client_.Get(&plasma_id, 1, 0, &object_buffer));
if (object_buffer.data == nullptr) {
RAY_LOG(ERROR) << "Failed to get object";
return std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status>(
errored_chunk_,
ray::Status::IOError("Unable to obtain object chunk, object not local."));
}
RAY_CHECK(object_buffer.metadata->data() ==
object_buffer.data->data() + object_buffer.data->size());
RAY_CHECK(data_size == static_cast<uint64_t>(object_buffer.data->size() +
object_buffer.metadata->size()));
auto *data = const_cast<uint8_t *>(object_buffer.data->data());
uint64_t num_chunks = GetNumChunks(data_size);
get_buffer_state_.emplace(
std::piecewise_construct, std::forward_as_tuple(object_id),
std::forward_as_tuple(BuildChunks(object_id, data, data_size)));
RAY_CHECK(get_buffer_state_[object_id].chunk_info.size() == num_chunks);
}
get_buffer_state_[object_id].references++;
return std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status>(
get_buffer_state_[object_id].chunk_info[chunk_index], ray::Status::OK());
}
void ObjectBufferPool::ReleaseGetChunk(const ObjectID &object_id, uint64_t chunk_index) {
std::lock_guard<std::mutex> lock(pool_mutex_);
GetBufferState &buffer_state = get_buffer_state_[object_id];
buffer_state.references--;
if (buffer_state.references == 0) {
RAY_ARROW_CHECK_OK(store_client_.Release(object_id.ToPlasmaId()));
get_buffer_state_.erase(object_id);
}
}
void ObjectBufferPool::AbortGet(const ObjectID &object_id) {
std::lock_guard<std::mutex> lock(pool_mutex_);
RAY_ARROW_CHECK_OK(store_client_.Release(object_id.ToPlasmaId()));
get_buffer_state_.erase(object_id);
}
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> ObjectBufferPool::CreateChunk(
const ObjectID &object_id, uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index) {
std::lock_guard<std::mutex> lock(pool_mutex_);
if (create_buffer_state_.count(object_id) == 0) {
const plasma::ObjectID plasma_id = object_id.ToPlasmaId();
int64_t object_size = data_size - metadata_size;
// Try to create shared buffer.
std::shared_ptr<Buffer> data;
arrow::Status s =
store_client_.Create(plasma_id, object_size, NULL, metadata_size, &data);
std::vector<boost::asio::mutable_buffer> buffer;
if (!s.ok()) {
// Create failed. The object may already exist locally. If something else went
// wrong, another chunk will succeed in creating the buffer, and this
// chunk will eventually make it here via pull requests.
return std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status>(
errored_chunk_, ray::Status::IOError(s.message()));
}
// Read object into store.
uint8_t *mutable_data = data->mutable_data();
uint64_t num_chunks = GetNumChunks(data_size);
create_buffer_state_.emplace(
std::piecewise_construct, std::forward_as_tuple(object_id),
std::forward_as_tuple(BuildChunks(object_id, mutable_data, data_size)));
RAY_LOG(DEBUG) << "Created object " << object_id
<< " in plasma store, number of chunks: " << num_chunks
<< ", chunk index: " << chunk_index;
RAY_CHECK(create_buffer_state_[object_id].chunk_info.size() == num_chunks);
}
if (create_buffer_state_[object_id].chunk_state[chunk_index] !=
CreateChunkState::AVAILABLE) {
// There can be only one reference to this chunk at any given time.
return std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status>(
errored_chunk_,
ray::Status::IOError("Chunk already referenced by another thread."));
}
create_buffer_state_[object_id].chunk_state[chunk_index] = CreateChunkState::REFERENCED;
return std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status>(
create_buffer_state_[object_id].chunk_info[chunk_index], ray::Status::OK());
}
void ObjectBufferPool::AbortCreateChunk(const ObjectID &object_id,
const uint64_t chunk_index) {
std::lock_guard<std::mutex> lock(pool_mutex_);
RAY_CHECK(create_buffer_state_[object_id].chunk_state[chunk_index] ==
CreateChunkState::REFERENCED);
create_buffer_state_[object_id].chunk_state[chunk_index] = CreateChunkState::AVAILABLE;
if (create_buffer_state_[object_id].num_seals_remaining ==
create_buffer_state_[object_id].chunk_state.size()) {
// If chunk_state is AVAILABLE at every chunk_index and
// num_seals_remaining == num_chunks, this is back to the initial state
// right before the first CreateChunk.
bool abort = true;
for (auto chunk_state : create_buffer_state_[object_id].chunk_state) {
abort &= chunk_state == CreateChunkState::AVAILABLE;
}
if (abort) {
AbortCreate(object_id);
}
}
}
void ObjectBufferPool::SealChunk(const ObjectID &object_id, const uint64_t chunk_index) {
std::lock_guard<std::mutex> lock(pool_mutex_);
RAY_CHECK(create_buffer_state_[object_id].chunk_state[chunk_index] ==
CreateChunkState::REFERENCED);
create_buffer_state_[object_id].chunk_state[chunk_index] = CreateChunkState::SEALED;
create_buffer_state_[object_id].num_seals_remaining--;
if (create_buffer_state_[object_id].num_seals_remaining == 0) {
const plasma::ObjectID plasma_id = object_id.ToPlasmaId();
RAY_ARROW_CHECK_OK(store_client_.Seal(plasma_id));
RAY_ARROW_CHECK_OK(store_client_.Release(plasma_id));
create_buffer_state_.erase(object_id);
RAY_LOG(DEBUG) << "Have received all chunks for object " << object_id
<< ", last chunk index: " << chunk_index;
}
}
void ObjectBufferPool::AbortCreate(const ObjectID &object_id) {
const plasma::ObjectID plasma_id = object_id.ToPlasmaId();
RAY_ARROW_CHECK_OK(store_client_.Release(plasma_id));
RAY_ARROW_CHECK_OK(store_client_.Abort(plasma_id));
create_buffer_state_.erase(object_id);
}
std::vector<ObjectBufferPool::ChunkInfo> ObjectBufferPool::BuildChunks(
const ObjectID &object_id, uint8_t *data, uint64_t data_size) {
uint64_t space_remaining = data_size;
std::vector<ChunkInfo> chunks;
int64_t position = 0;
while (space_remaining) {
position = data_size - space_remaining;
if (space_remaining < default_chunk_size_) {
chunks.emplace_back(chunks.size(), data + position, space_remaining);
space_remaining = 0;
} else {
chunks.emplace_back(chunks.size(), data + position, default_chunk_size_);
space_remaining -= default_chunk_size_;
}
}
return chunks;
}
void ObjectBufferPool::FreeObjects(const std::vector<ObjectID> &object_ids) {
std::vector<plasma::ObjectID> plasma_ids;
plasma_ids.reserve(object_ids.size());
for (const auto &id : object_ids) {
plasma_ids.push_back(id.ToPlasmaId());
}
std::lock_guard<std::mutex> lock(pool_mutex_);
RAY_ARROW_CHECK_OK(store_client_.Delete(plasma_ids));
}
std::string ObjectBufferPool::DebugString() const {
std::lock_guard<std::mutex> lock(pool_mutex_);
std::stringstream result;
result << "BufferPool:";
result << "\n- get buffer state map size: " << get_buffer_state_.size();
result << "\n- create buffer state map size: " << create_buffer_state_.size();
return result.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_buffer_pool.h
|
C/C++ Header
|
#ifndef RAY_OBJECT_MANAGER_OBJECT_BUFFER_POOL_H
#define RAY_OBJECT_MANAGER_OBJECT_BUFFER_POOL_H
#include <list>
#include <memory>
#include <mutex>
#include <vector>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include "plasma/client.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
namespace ray {
/// \class ObjectBufferPool Exposes chunks of object buffers for use by the ObjectManager.
class ObjectBufferPool {
public:
/// Information needed to read or write an object chunk.
/// This is the structure returned whenever an object chunk is
/// accessed via Get and Create.
struct ChunkInfo {
ChunkInfo(uint64_t chunk_index, uint8_t *data, uint64_t buffer_length)
: chunk_index(chunk_index), data(data), buffer_length(buffer_length){};
/// A pointer to the start position of this object chunk.
uint64_t chunk_index;
/// A pointer to the start position of this object chunk.
uint8_t *data;
/// The size of this object chunk.
uint64_t buffer_length;
};
/// Constructor.
///
/// \param store_socket_name The socket name of the store to which plasma clients
/// connect.
/// \param chunk_size The chunk size into which objects are to be split.
ObjectBufferPool(const std::string &store_socket_name, const uint64_t chunk_size);
~ObjectBufferPool();
/// This object cannot be copied due to pool_mutex.
RAY_DISALLOW_COPY_AND_ASSIGN(ObjectBufferPool);
/// Computes the number of chunks needed to transfer an object and its metadata.
///
/// \param data_size The size of the object + metadata.
/// \return The number of chunks into which the object will be split.
uint64_t GetNumChunks(uint64_t data_size);
/// Computes the buffer length of a chunk of an object.
///
/// \param chunk_index The chunk index for which to obtain the buffer length.
/// \param data_size The size of the object + metadata.
/// \return The buffer length of the chunk at chunk_index.
uint64_t GetBufferLength(uint64_t chunk_index, uint64_t data_size);
/// Returns a chunk of an object at the given chunk_index. The object chunk serves
/// as the data that is to be written to a connection as part of sending an object to
/// a remote node.
///
/// \param object_id The ObjectID.
/// \param data_size The sum of the object size and metadata size.
/// \param metadata_size The size of the metadata.
/// \param chunk_index The index of the chunk.
/// \return A pair consisting of a ChunkInfo and status of invoking this method.
/// An IOError status is returned if the Get call on the plasma store fails.
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> GetChunk(
const ObjectID &object_id, uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index);
/// When a chunk is done being used as part of a get, this method releases the chunk.
/// If all chunks of an object are released, the object buffer will be released.
///
/// \param object_id The object_id of the buffer to release.
/// \param chunk_index The index of the chunk.
void ReleaseGetChunk(const ObjectID &object_id, uint64_t chunk_index);
/// Returns a chunk of an empty object at the given chunk_index. The object chunk
/// serves as the buffer that is to be written to by a connection receiving an object
/// from a remote node. Only one thread is permitted to create the object chunk at
/// chunk_index. Multiple threads attempting to create the same object chunk will
/// result in one succeeding. The ObjectManager is responsible for handling
/// create failures. This method will fail if it's invoked on a chunk_index on which
/// SealChunk has already been invoked.
///
/// \param object_id The ObjectID.
/// \param data_size The sum of the object size and metadata size.
/// \param metadata_size The size of the metadata.
/// \param chunk_index The index of the chunk.
/// \return A pair consisting of ChunkInfo and status of invoking this method.
/// An IOError status is returned if object creation on the store client fails,
/// or if create is invoked consecutively on the same chunk
/// (with no intermediate AbortCreateChunk).
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> CreateChunk(
const ObjectID &object_id, uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index);
/// Abort the create operation associated with a chunk at chunk_index.
/// This method will fail if it's invoked on a chunk_index on which
/// CreateChunk was not first invoked, or a chunk_index on which
/// SealChunk has already been invoked.
///
/// \param object_id The ObjectID.
/// \param chunk_index The index of the chunk.
void AbortCreateChunk(const ObjectID &object_id, uint64_t chunk_index);
/// Seal the object associated with a create operation. This is invoked whenever
/// a chunk is successfully written to.
/// This method will fail if it's invoked on a chunk_index on which
/// CreateChunk was not first invoked, or a chunk_index on which
/// SealChunk or AbortCreateChunk has already been invoked.
///
/// \param object_id The ObjectID.
/// \param chunk_index The index of the chunk.
void SealChunk(const ObjectID &object_id, uint64_t chunk_index);
/// Free a list of objects from object store.
///
/// \param object_ids the The list of ObjectIDs to be deleted.
/// \return Void.
void FreeObjects(const std::vector<ObjectID> &object_ids);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
private:
/// Abort the create operation associated with an object. This destroys the buffer
/// state, including create operations in progress for all chunks of the object.
void AbortCreate(const ObjectID &object_id);
/// Abort the get operation associated with an object.
void AbortGet(const ObjectID &object_id);
/// Splits an object into ceil(data_size/chunk_size) chunks, which will
/// either be read or written to in parallel.
std::vector<ChunkInfo> BuildChunks(const ObjectID &object_id, uint8_t *data,
uint64_t data_size);
/// Holds the state of a get buffer.
struct GetBufferState {
GetBufferState() {}
GetBufferState(std::vector<ChunkInfo> chunk_info) : chunk_info(chunk_info) {}
/// A vector maintaining information about the chunks which comprise
/// an object.
std::vector<ChunkInfo> chunk_info;
/// The number of references that currently rely on this buffer.
/// Once this reaches 0, the buffer is released and this object is erased
/// from get_buffer_state_.
uint64_t references = 0;
};
/// The state of a chunk associated with a create operation.
enum class CreateChunkState : unsigned int { AVAILABLE = 0, REFERENCED, SEALED };
/// Holds the state of a create buffer.
struct CreateBufferState {
CreateBufferState() {}
CreateBufferState(std::vector<ChunkInfo> chunk_info)
: chunk_info(chunk_info),
chunk_state(chunk_info.size(), CreateChunkState::AVAILABLE),
num_seals_remaining(chunk_info.size()) {}
/// A vector maintaining information about the chunks which comprise
/// an object.
std::vector<ChunkInfo> chunk_info;
/// The state of each chunk, which is used to enforce strict state
/// transitions of each chunk.
std::vector<CreateChunkState> chunk_state;
/// The number of chunks left to seal before the buffer is sealed.
uint64_t num_seals_remaining;
};
/// Returned when GetChunk or CreateChunk fails.
const ChunkInfo errored_chunk_ = {0, nullptr, 0};
/// Mutex on public methods for thread-safe operations on
/// get_buffer_state_, create_buffer_state_, and store_client_.
mutable std::mutex pool_mutex_;
/// Determines the maximum chunk size to be transferred by a single thread.
const uint64_t default_chunk_size_;
/// The state of a buffer that's currently being used.
std::unordered_map<ray::ObjectID, GetBufferState> get_buffer_state_;
/// The state of a buffer that's currently being used.
std::unordered_map<ray::ObjectID, CreateBufferState> create_buffer_state_;
/// Plasma client pool.
plasma::PlasmaClient store_client_;
/// Socket name of plasma store.
std::string store_socket_name_;
};
} // namespace ray
#endif // RAY_OBJECT_MANAGER_OBJECT_BUFFER_POOL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_directory.cc
|
C++
|
#include "ray/object_manager/object_directory.h"
namespace ray {
ObjectDirectory::ObjectDirectory(boost::asio::io_service &io_service,
std::shared_ptr<gcs::GcsClient> &gcs_client)
: io_service_(io_service), gcs_client_(gcs_client) {}
namespace {
using ray::rpc::GcsChangeMode;
using ray::rpc::GcsNodeInfo;
using ray::rpc::ObjectTableData;
/// Process a notification of the object table entries and store the result in
/// node_ids. This assumes that node_ids already contains the result of the
/// object table entries up to but not including this notification.
void UpdateObjectLocations(bool is_added,
const std::vector<ObjectTableData> &location_updates,
std::shared_ptr<gcs::GcsClient> gcs_client,
std::unordered_set<ClientID> *node_ids) {
// location_updates contains the updates of locations of the object.
// with GcsChangeMode, we can determine whether the update mode is
// addition or deletion.
for (const auto &object_table_data : location_updates) {
ClientID node_id = ClientID::FromBinary(object_table_data.manager());
if (is_added) {
node_ids->insert(node_id);
} else {
node_ids->erase(node_id);
}
}
// Filter out the removed clients from the object locations.
for (auto it = node_ids->begin(); it != node_ids->end();) {
if (gcs_client->Nodes().IsRemoved(*it)) {
it = node_ids->erase(it);
} else {
it++;
}
}
}
} // namespace
ray::Status ObjectDirectory::ReportObjectAdded(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) {
RAY_LOG(DEBUG) << "Reporting object added to GCS " << object_id;
ray::Status status =
gcs_client_->Objects().AsyncAddLocation(object_id, client_id, nullptr);
return status;
}
ray::Status ObjectDirectory::ReportObjectRemoved(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) {
RAY_LOG(DEBUG) << "Reporting object removed to GCS " << object_id;
ray::Status status =
gcs_client_->Objects().AsyncRemoveLocation(object_id, client_id, nullptr);
return status;
};
void ObjectDirectory::LookupRemoteConnectionInfo(
RemoteConnectionInfo &connection_info) const {
auto node_info = gcs_client_->Nodes().Get(connection_info.client_id);
if (node_info) {
ClientID result_node_id = ClientID::FromBinary(node_info->node_id());
RAY_CHECK(result_node_id == connection_info.client_id);
if (node_info->state() == GcsNodeInfo::ALIVE) {
connection_info.ip = node_info->node_manager_address();
connection_info.port = static_cast<uint16_t>(node_info->object_manager_port());
}
}
}
std::vector<RemoteConnectionInfo> ObjectDirectory::LookupAllRemoteConnections() const {
std::vector<RemoteConnectionInfo> remote_connections;
const auto &node_map = gcs_client_->Nodes().GetAll();
for (const auto &item : node_map) {
RemoteConnectionInfo info(item.first);
LookupRemoteConnectionInfo(info);
if (info.Connected() && info.client_id != gcs_client_->Nodes().GetSelfId()) {
remote_connections.push_back(info);
}
}
return remote_connections;
}
void ObjectDirectory::HandleClientRemoved(const ClientID &client_id) {
for (auto &listener : listeners_) {
const ObjectID &object_id = listener.first;
if (listener.second.current_object_locations.count(client_id) > 0) {
// If the subscribed object has the removed client as a location, update
// its locations with an empty update so that the location will be removed.
UpdateObjectLocations(/*is_added*/ true, {}, gcs_client_,
&listener.second.current_object_locations);
// Re-call all the subscribed callbacks for the object, since its
// locations have changed.
for (const auto &callback_pair : listener.second.callbacks) {
// It is safe to call the callback directly since this is already running
// in the subscription callback stack.
callback_pair.second(object_id, listener.second.current_object_locations);
}
}
}
}
ray::Status ObjectDirectory::SubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id,
const OnLocationsFound &callback) {
ray::Status status = ray::Status::OK();
auto it = listeners_.find(object_id);
if (it == listeners_.end()) {
it = listeners_.emplace(object_id, LocationListenerState()).first;
auto object_notification_callback =
[this](const ObjectID &object_id,
const gcs::ObjectChangeNotification &object_notification) {
// Objects are added to this map in SubscribeObjectLocations.
auto it = listeners_.find(object_id);
// Do nothing for objects we are not listening for.
if (it == listeners_.end()) {
return;
}
// Once this flag is set to true, it should never go back to false.
it->second.subscribed = true;
// Update entries for this object.
UpdateObjectLocations(object_notification.IsAdded(),
object_notification.GetData(), gcs_client_,
&it->second.current_object_locations);
// Copy the callbacks so that the callbacks can unsubscribe without interrupting
// looping over the callbacks.
auto callbacks = it->second.callbacks;
// Call all callbacks associated with the object id locations we have
// received. This notifies the client even if the list of locations is
// empty, since this may indicate that the objects have been evicted from
// all nodes.
for (const auto &callback_pair : callbacks) {
// It is safe to call the callback directly since this is already running
// in the subscription callback stack.
callback_pair.second(object_id, it->second.current_object_locations);
}
};
status = gcs_client_->Objects().AsyncSubscribeToLocations(
object_id, object_notification_callback, /*done*/ nullptr);
}
auto &listener_state = it->second;
// TODO(hme): Make this fatal after implementing Pull suppression.
if (listener_state.callbacks.count(callback_id) > 0) {
return ray::Status::OK();
}
listener_state.callbacks.emplace(callback_id, callback);
// If we previously received some notifications about the object's locations,
// immediately notify the caller of the current known locations.
if (listener_state.subscribed) {
auto &locations = listener_state.current_object_locations;
io_service_.post(
[callback, locations, object_id]() { callback(object_id, locations); });
}
return status;
}
ray::Status ObjectDirectory::UnsubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id) {
ray::Status status = ray::Status::OK();
auto entry = listeners_.find(object_id);
if (entry == listeners_.end()) {
return status;
}
entry->second.callbacks.erase(callback_id);
if (entry->second.callbacks.empty()) {
status =
gcs_client_->Objects().AsyncUnsubscribeToLocations(object_id, /*done*/ nullptr);
listeners_.erase(entry);
}
return status;
}
ray::Status ObjectDirectory::LookupLocations(const ObjectID &object_id,
const OnLocationsFound &callback) {
ray::Status status;
auto it = listeners_.find(object_id);
if (it != listeners_.end() && it->second.subscribed) {
// If we have locations cached due to a concurrent SubscribeObjectLocations
// call, and we have received at least one notification from the GCS about
// the object's creation, then call the callback immediately with the
// cached locations.
auto &locations = it->second.current_object_locations;
io_service_.post(
[callback, object_id, locations]() { callback(object_id, locations); });
} else {
// We do not have any locations cached due to a concurrent
// SubscribeObjectLocations call, so look up the object's locations
// directly from the GCS.
status = gcs_client_->Objects().AsyncGetLocations(
object_id,
[this, object_id, callback](
Status status, const std::vector<ObjectTableData> &location_updates) {
RAY_CHECK(status.ok())
<< "Failed to get object location from GCS: " << status.message();
// Build the set of current locations based on the entries in the log.
std::unordered_set<ClientID> node_ids;
UpdateObjectLocations(/*is_added*/ true, location_updates, gcs_client_,
&node_ids);
// It is safe to call the callback directly since this is already running
// in the GCS client's lookup callback stack.
callback(object_id, node_ids);
});
}
return status;
}
std::string ObjectDirectory::DebugString() const {
std::stringstream result;
result << "ObjectDirectory:";
result << "\n- num listeners: " << listeners_.size();
return result.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_directory.h
|
C/C++ Header
|
#ifndef RAY_OBJECT_MANAGER_OBJECT_DIRECTORY_H
#define RAY_OBJECT_MANAGER_OBJECT_DIRECTORY_H
#include <memory>
#include <mutex>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "plasma/client.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/object_manager/format/object_manager_generated.h"
namespace ray {
/// Connection information for remote object managers.
struct RemoteConnectionInfo {
RemoteConnectionInfo(const ClientID &id) : client_id(id) {}
// Returns whether there is enough information to connect to the remote
// object manager.
bool Connected() const { return !ip.empty(); }
ClientID client_id;
std::string ip;
uint16_t port;
};
class ObjectDirectoryInterface {
public:
virtual ~ObjectDirectoryInterface() {}
/// Lookup how to connect to a remote object manager.
///
/// \param connection_info The connection information to fill out. This
/// should be pre-populated with the requested client ID. If the directory
/// has information about the requested client, then the rest of the fields
/// in this struct will be populated accordingly.
virtual void LookupRemoteConnectionInfo(
RemoteConnectionInfo &connection_info) const = 0;
/// Get information for all connected remote object managers.
///
/// \return A vector of information for all connected remote object managers.
virtual std::vector<RemoteConnectionInfo> LookupAllRemoteConnections() const = 0;
/// Callback for object location notifications.
using OnLocationsFound = std::function<void(const ray::ObjectID &object_id,
const std::unordered_set<ray::ClientID> &)>;
/// Lookup object locations. Callback may be invoked with empty list of client ids.
///
/// \param object_id The object's ObjectID.
/// \param callback Invoked with (possibly empty) list of client ids and object_id.
/// \return Status of whether async call to backend succeeded.
virtual ray::Status LookupLocations(const ObjectID &object_id,
const OnLocationsFound &callback) = 0;
/// Handle the removal of an object manager client. This updates the
/// locations of all subscribed objects that have the removed client as a
/// location, and fires the subscribed callbacks for those objects.
///
/// \param client_id The object manager client that was removed.
virtual void HandleClientRemoved(const ClientID &client_id) = 0;
/// Subscribe to be notified of locations (ClientID) of the given object.
/// The callback will be invoked with the complete list of known locations
/// whenever the set of locations changes. The callback will also be fired if
/// the list of known locations is empty. The callback provided to this
/// method may fire immediately, within the call to this method, if any other
/// listener is subscribed to the same object: This occurs when location data
/// for the object has already been obtained.
///
/// \param callback_id The id associated with the specified callback. This is
/// needed when UnsubscribeObjectLocations is called.
/// \param object_id The required object's ObjectID.
/// \param success_cb Invoked with non-empty list of client ids and object_id.
/// \return Status of whether subscription succeeded.
virtual ray::Status SubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id,
const OnLocationsFound &callback) = 0;
/// Unsubscribe to object location notifications.
///
/// \param callback_id The id associated with a callback. This was given
/// at subscription time, and unsubscribes the corresponding callback from
/// further notifications about the given object's location.
/// \param object_id The object id invoked with Subscribe.
/// \return Status of unsubscribing from object location notifications.
virtual ray::Status UnsubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id) = 0;
/// Report objects added to this node's store to the object directory.
///
/// \param object_id The object id that was put into the store.
/// \param client_id The client id corresponding to this node.
/// \param object_info Additional information about the object.
/// \return Status of whether this method succeeded.
virtual ray::Status ReportObjectAdded(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) = 0;
/// Report objects removed from this client's store to the object directory.
///
/// \param object_id The object id that was removed from the store.
/// \param client_id The client id corresponding to this node.
/// \param object_info Additional information about the object.
/// \return Status of whether this method succeeded.
virtual ray::Status ReportObjectRemoved(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) = 0;
/// Returns debug string for class.
///
/// \return string.
virtual std::string DebugString() const = 0;
};
/// Ray ObjectDirectory declaration.
class ObjectDirectory : public ObjectDirectoryInterface {
public:
/// Create an object directory.
///
/// \param io_service The event loop to dispatch callbacks to. This should
/// usually be the same event loop that the given gcs_client runs on.
/// \param gcs_client A Ray GCS client to request object and client
/// information from.
ObjectDirectory(boost::asio::io_service &io_service,
std::shared_ptr<gcs::GcsClient> &gcs_client);
virtual ~ObjectDirectory() {}
void LookupRemoteConnectionInfo(RemoteConnectionInfo &connection_info) const override;
std::vector<RemoteConnectionInfo> LookupAllRemoteConnections() const override;
ray::Status LookupLocations(const ObjectID &object_id,
const OnLocationsFound &callback) override;
void HandleClientRemoved(const ClientID &client_id) override;
ray::Status SubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id,
const OnLocationsFound &callback) override;
ray::Status UnsubscribeObjectLocations(const UniqueID &callback_id,
const ObjectID &object_id) override;
ray::Status ReportObjectAdded(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) override;
ray::Status ReportObjectRemoved(
const ObjectID &object_id, const ClientID &client_id,
const object_manager::protocol::ObjectInfoT &object_info) override;
std::string DebugString() const override;
/// ObjectDirectory should not be copied.
RAY_DISALLOW_COPY_AND_ASSIGN(ObjectDirectory);
private:
/// Callbacks associated with a call to GetLocations.
struct LocationListenerState {
/// The callback to invoke when object locations are found.
std::unordered_map<UniqueID, OnLocationsFound> callbacks;
/// The current set of known locations of this object.
std::unordered_set<ClientID> current_object_locations;
/// This flag will get set to true if received any notification of the object.
/// It means current_object_locations is up-to-date with GCS. It
/// should never go back to false once set to true. If this is true, and
/// the current_object_locations is empty, then this means that the object
/// does not exist on any nodes due to eviction or the object never getting created.
bool subscribed;
};
/// Reference to the event loop.
boost::asio::io_service &io_service_;
/// Reference to the gcs client.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// Info about subscribers to object locations.
std::unordered_map<ObjectID, LocationListenerState> listeners_;
};
} // namespace ray
#endif // RAY_OBJECT_MANAGER_OBJECT_DIRECTORY_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_manager.cc
|
C++
|
#include "ray/object_manager/object_manager.h"
#include "ray/common/common_protocol.h"
#include "ray/stats/stats.h"
#include "ray/util/util.h"
namespace asio = boost::asio;
namespace object_manager_protocol = ray::object_manager::protocol;
namespace ray {
ObjectManager::ObjectManager(asio::io_service &main_service, const ClientID &self_node_id,
const ObjectManagerConfig &config,
std::shared_ptr<ObjectDirectoryInterface> object_directory)
: self_node_id_(self_node_id),
config_(config),
object_directory_(std::move(object_directory)),
store_notification_(main_service, config_.store_socket_name),
buffer_pool_(config_.store_socket_name, config_.object_chunk_size),
rpc_work_(rpc_service_),
gen_(std::chrono::high_resolution_clock::now().time_since_epoch().count()),
object_manager_server_("ObjectManager", config_.object_manager_port,
config_.rpc_service_threads_number),
object_manager_service_(rpc_service_, *this),
client_call_manager_(main_service, config_.rpc_service_threads_number) {
RAY_CHECK(config_.rpc_service_threads_number > 0);
main_service_ = &main_service;
store_notification_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
HandleObjectAdded(object_info);
});
store_notification_.SubscribeObjDeleted(
[this](const ObjectID &oid) { NotifyDirectoryObjectDeleted(oid); });
// Start object manager rpc server and send & receive request threads
StartRpcService();
}
ObjectManager::~ObjectManager() { StopRpcService(); }
void ObjectManager::RunRpcService() { rpc_service_.run(); }
void ObjectManager::StartRpcService() {
rpc_threads_.resize(config_.rpc_service_threads_number);
for (int i = 0; i < config_.rpc_service_threads_number; i++) {
rpc_threads_[i] = std::thread(&ObjectManager::RunRpcService, this);
}
object_manager_server_.RegisterService(object_manager_service_);
object_manager_server_.Run();
}
void ObjectManager::StopRpcService() {
rpc_service_.stop();
for (int i = 0; i < config_.rpc_service_threads_number; i++) {
rpc_threads_[i].join();
}
object_manager_server_.Shutdown();
}
void ObjectManager::HandleObjectAdded(
const object_manager::protocol::ObjectInfoT &object_info) {
// Notify the object directory that the object has been added to this node.
ObjectID object_id = ObjectID::FromPlasmaIdBinary(object_info.object_id);
RAY_LOG(DEBUG) << "Object added " << object_id;
RAY_CHECK(local_objects_.count(object_id) == 0);
local_objects_[object_id].object_info = object_info;
ray::Status status =
object_directory_->ReportObjectAdded(object_id, self_node_id_, object_info);
// Handle the unfulfilled_push_requests_ which contains the push request that is not
// completed due to unsatisfied local objects.
auto iter = unfulfilled_push_requests_.find(object_id);
if (iter != unfulfilled_push_requests_.end()) {
for (auto &pair : iter->second) {
auto &client_id = pair.first;
main_service_->post([this, object_id, client_id]() { Push(object_id, client_id); });
// When push timeout is set to -1, there will be an empty timer in pair.second.
if (pair.second != nullptr) {
pair.second->cancel();
}
}
unfulfilled_push_requests_.erase(iter);
}
// The object is local, so we no longer need to Pull it from a remote
// manager. Cancel any outstanding Pull requests for this object.
CancelPull(object_id);
}
void ObjectManager::NotifyDirectoryObjectDeleted(const ObjectID &object_id) {
auto it = local_objects_.find(object_id);
RAY_CHECK(it != local_objects_.end());
auto object_info = it->second.object_info;
local_objects_.erase(it);
ray::Status status =
object_directory_->ReportObjectRemoved(object_id, self_node_id_, object_info);
}
ray::Status ObjectManager::SubscribeObjAdded(
std::function<void(const object_manager::protocol::ObjectInfoT &)> callback) {
store_notification_.SubscribeObjAdded(callback);
return ray::Status::OK();
}
ray::Status ObjectManager::SubscribeObjDeleted(
std::function<void(const ObjectID &)> callback) {
store_notification_.SubscribeObjDeleted(callback);
return ray::Status::OK();
}
ray::Status ObjectManager::Pull(const ObjectID &object_id) {
RAY_LOG(DEBUG) << "Pull on " << self_node_id_ << " of object " << object_id;
// Check if object is already local.
if (local_objects_.count(object_id) != 0) {
RAY_LOG(ERROR) << object_id << " attempted to pull an object that's already local.";
return ray::Status::OK();
}
if (pull_requests_.find(object_id) != pull_requests_.end()) {
return ray::Status::OK();
}
pull_requests_.emplace(object_id, PullRequest());
// Subscribe to object notifications. A notification will be received every
// time the set of client IDs for the object changes. Notifications will also
// be received if the list of locations is empty. The set of client IDs has
// no ordering guarantee between notifications.
return object_directory_->SubscribeObjectLocations(
object_directory_pull_callback_id_, object_id,
[this](const ObjectID &object_id, const std::unordered_set<ClientID> &client_ids) {
// Exit if the Pull request has already been fulfilled or canceled.
auto it = pull_requests_.find(object_id);
if (it == pull_requests_.end()) {
return;
}
// Reset the list of clients that are now expected to have the object.
// NOTE(swang): Since we are overwriting the previous list of clients,
// we may end up sending a duplicate request to the same client as
// before.
it->second.client_locations =
std::vector<ClientID>(client_ids.begin(), client_ids.end());
if (it->second.client_locations.empty()) {
// The object locations are now empty, so we should wait for the next
// notification about a new object location. Cancel the timer until
// the next Pull attempt since there are no more clients to try.
if (it->second.retry_timer != nullptr) {
it->second.retry_timer->cancel();
it->second.timer_set = false;
}
} else {
// New object locations were found, so begin trying to pull from a
// client. This will be called every time a new client location
// appears.
TryPull(object_id);
}
});
}
void ObjectManager::TryPull(const ObjectID &object_id) {
auto it = pull_requests_.find(object_id);
if (it == pull_requests_.end()) {
return;
}
auto &node_vector = it->second.client_locations;
// The timer should never fire if there are no expected client locations.
if (node_vector.empty()) {
return;
}
RAY_CHECK(local_objects_.count(object_id) == 0);
// Make sure that there is at least one client which is not the local client.
// TODO(rkn): It may actually be possible for this check to fail.
if (node_vector.size() == 1 && node_vector[0] == self_node_id_) {
RAY_LOG(ERROR) << "The object manager with ID " << self_node_id_
<< " is trying to pull object " << object_id
<< " but the object table suggests that this object manager "
<< "already has the object. The object may have been evicted.";
it->second.timer_set = false;
return;
}
// Choose a random client to pull the object from.
// Generate a random index.
std::uniform_int_distribution<int> distribution(0, node_vector.size() - 1);
int node_index = distribution(gen_);
ClientID node_id = node_vector[node_index];
// If the object manager somehow ended up choosing itself, choose a different
// object manager.
if (node_id == self_node_id_) {
std::swap(node_vector[node_index], node_vector[node_vector.size() - 1]);
node_vector.pop_back();
RAY_LOG(ERROR) << "The object manager with ID " << self_node_id_
<< " is trying to pull object " << object_id
<< " but the object table suggests that this object manager "
<< "already has the object.";
node_id = node_vector[node_index % node_vector.size()];
RAY_CHECK(node_id != self_node_id_);
}
RAY_LOG(DEBUG) << "Sending pull request from " << self_node_id_ << " to " << node_id
<< " of object " << object_id;
auto rpc_client = GetRpcClient(node_id);
if (rpc_client) {
// Try pulling from the client.
rpc_service_.post([this, object_id, node_id, rpc_client]() {
SendPullRequest(object_id, node_id, rpc_client);
});
} else {
RAY_LOG(ERROR) << "Couldn't send pull request from " << self_node_id_ << " to "
<< node_id << " of object " << object_id
<< " , setup rpc connection failed.";
}
// If there are more clients to try, try them in succession, with a timeout
// in between each try.
if (!it->second.client_locations.empty()) {
if (it->second.retry_timer == nullptr) {
// Set the timer if we haven't already.
it->second.retry_timer = std::unique_ptr<boost::asio::deadline_timer>(
new boost::asio::deadline_timer(*main_service_));
}
// Wait for a timeout. If we receive the object or a caller Cancels the
// Pull within the timeout, then nothing will happen. Otherwise, the timer
// will fire and the next client in the list will be tried.
boost::posix_time::milliseconds retry_timeout(config_.pull_timeout_ms);
it->second.retry_timer->expires_from_now(retry_timeout);
it->second.retry_timer->async_wait(
[this, object_id](const boost::system::error_code &error) {
if (!error) {
// Try the Pull from the next client.
TryPull(object_id);
} else {
// Check that the error was due to the timer being canceled.
RAY_CHECK(error == boost::asio::error::operation_aborted);
}
});
// Record that we set the timer until the next attempt.
it->second.timer_set = true;
} else {
// The timer is not reset since there are no more clients to try. Go back
// to waiting for more notifications. Once we receive a new object location
// from the object directory, then the Pull will be retried.
it->second.timer_set = false;
}
};
void ObjectManager::SendPullRequest(
const ObjectID &object_id, const ClientID &client_id,
std::shared_ptr<rpc::ObjectManagerClient> rpc_client) {
rpc::PullRequest pull_request;
pull_request.set_object_id(object_id.Binary());
pull_request.set_client_id(self_node_id_.Binary());
rpc_client->Pull(pull_request, [object_id, client_id](const Status &status,
const rpc::PullReply &reply) {
if (!status.ok()) {
RAY_LOG(WARNING) << "Send pull " << object_id << " request to client " << client_id
<< " failed due to" << status.message();
}
});
}
void ObjectManager::HandlePushTaskTimeout(const ObjectID &object_id,
const ClientID &client_id) {
RAY_LOG(WARNING) << "Invalid Push request ObjectID: " << object_id
<< " after waiting for " << config_.push_timeout_ms << " ms.";
auto iter = unfulfilled_push_requests_.find(object_id);
RAY_CHECK(iter != unfulfilled_push_requests_.end());
size_t num_erased = iter->second.erase(client_id);
RAY_CHECK(num_erased == 1);
if (iter->second.size() == 0) {
unfulfilled_push_requests_.erase(iter);
}
}
void ObjectManager::HandleSendFinished(const ObjectID &object_id,
const ClientID &client_id, uint64_t chunk_index,
double start_time, double end_time,
ray::Status status) {
RAY_LOG(DEBUG) << "HandleSendFinished on " << self_node_id_ << " to " << client_id
<< " of object " << object_id << " chunk " << chunk_index
<< ", status: " << status.ToString();
if (!status.ok()) {
// TODO(rkn): What do we want to do if the send failed?
}
rpc::ProfileTableData::ProfileEvent profile_event;
profile_event.set_event_type("transfer_send");
profile_event.set_start_time(start_time);
profile_event.set_end_time(end_time);
// Encode the object ID, client ID, chunk index, and status as a json list,
// which will be parsed by the reader of the profile table.
profile_event.set_extra_data("[\"" + object_id.Hex() + "\",\"" + client_id.Hex() +
"\"," + std::to_string(chunk_index) + ",\"" +
status.ToString() + "\"]");
std::lock_guard<std::mutex> lock(profile_mutex_);
profile_events_.push_back(profile_event);
}
void ObjectManager::HandleReceiveFinished(const ObjectID &object_id,
const ClientID &client_id, uint64_t chunk_index,
double start_time, double end_time,
ray::Status status) {
if (!status.ok()) {
// TODO(rkn): What do we want to do if the send failed?
}
rpc::ProfileTableData::ProfileEvent profile_event;
profile_event.set_event_type("transfer_receive");
profile_event.set_start_time(start_time);
profile_event.set_end_time(end_time);
// Encode the object ID, client ID, chunk index, and status as a json list,
// which will be parsed by the reader of the profile table.
profile_event.set_extra_data("[\"" + object_id.Hex() + "\",\"" + client_id.Hex() +
"\"," + std::to_string(chunk_index) + ",\"" +
status.ToString() + "\"]");
std::lock_guard<std::mutex> lock(profile_mutex_);
profile_events_.push_back(profile_event);
}
void ObjectManager::Push(const ObjectID &object_id, const ClientID &client_id) {
RAY_LOG(DEBUG) << "Push on " << self_node_id_ << " to " << client_id << " of object "
<< object_id;
if (local_objects_.count(object_id) == 0) {
// Avoid setting duplicated timer for the same object and client pair.
auto &clients = unfulfilled_push_requests_[object_id];
if (clients.count(client_id) == 0) {
// If config_.push_timeout_ms < 0, we give an empty timer
// and the task will be kept infinitely.
auto timer = std::unique_ptr<boost::asio::deadline_timer>();
if (config_.push_timeout_ms == 0) {
// The Push request fails directly when config_.push_timeout_ms == 0.
RAY_LOG(WARNING) << "Invalid Push request ObjectID " << object_id
<< " due to direct timeout setting. ";
} else if (config_.push_timeout_ms > 0) {
// Put the task into a queue and wait for the notification of Object added.
timer.reset(new boost::asio::deadline_timer(*main_service_));
auto clean_push_period = boost::posix_time::milliseconds(config_.push_timeout_ms);
timer->expires_from_now(clean_push_period);
timer->async_wait(
[this, object_id, client_id](const boost::system::error_code &error) {
// Timer killing will receive the boost::asio::error::operation_aborted,
// we only handle the timeout event.
if (!error) {
HandlePushTaskTimeout(object_id, client_id);
}
});
}
if (config_.push_timeout_ms != 0) {
clients.emplace(client_id, std::move(timer));
}
}
return;
}
// If we haven't pushed this object to this same object manager yet, then push
// it. If we have, but it was a long time ago, then push it. If we have and it
// was recent, then don't do it again.
auto &recent_pushes = local_objects_[object_id].recent_pushes;
auto it = recent_pushes.find(client_id);
if (it == recent_pushes.end()) {
// We haven't pushed this specific object to this specific object manager
// yet (or if we have then the object must have been evicted and recreated
// locally).
recent_pushes[client_id] = absl::GetCurrentTimeNanos() / 1000000;
} else {
int64_t current_time = absl::GetCurrentTimeNanos() / 1000000;
if (current_time - it->second <=
RayConfig::instance().object_manager_repeated_push_delay_ms()) {
// We pushed this object to the object manager recently, so don't do it
// again.
RAY_LOG(DEBUG) << "Object " << object_id << " recently pushed to " << client_id;
return;
} else {
it->second = current_time;
}
}
auto rpc_client = GetRpcClient(client_id);
if (rpc_client) {
const object_manager::protocol::ObjectInfoT &object_info =
local_objects_[object_id].object_info;
uint64_t data_size =
static_cast<uint64_t>(object_info.data_size + object_info.metadata_size);
uint64_t metadata_size = static_cast<uint64_t>(object_info.metadata_size);
uint64_t num_chunks = buffer_pool_.GetNumChunks(data_size);
RAY_LOG(DEBUG) << "Sending object chunks of " << object_id << " to client "
<< client_id << ", number of chunks: " << num_chunks
<< ", total data size: " << data_size;
UniqueID push_id = UniqueID::FromRandom();
for (uint64_t chunk_index = 0; chunk_index < num_chunks; ++chunk_index) {
rpc_service_.post([this, push_id, object_id, client_id, data_size, metadata_size,
chunk_index, rpc_client]() {
auto st = SendObjectChunk(push_id, object_id, client_id, data_size, metadata_size,
chunk_index, rpc_client);
if (!st.ok()) {
RAY_LOG(WARNING) << "Send object " << object_id << " chunk failed due to "
<< st.message() << ", chunk index " << chunk_index;
}
});
}
} else {
// Push is best effort, so do nothing here.
RAY_LOG(ERROR)
<< "Failed to establish connection for Push with remote object manager.";
}
}
ray::Status ObjectManager::SendObjectChunk(
const UniqueID &push_id, const ObjectID &object_id, const ClientID &client_id,
uint64_t data_size, uint64_t metadata_size, uint64_t chunk_index,
std::shared_ptr<rpc::ObjectManagerClient> rpc_client) {
double start_time = absl::GetCurrentTimeNanos() / 1e9;
rpc::PushRequest push_request;
// Set request header
push_request.set_push_id(push_id.Binary());
push_request.set_object_id(object_id.Binary());
push_request.set_client_id(self_node_id_.Binary());
push_request.set_data_size(data_size);
push_request.set_metadata_size(metadata_size);
push_request.set_chunk_index(chunk_index);
// Get data
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> chunk_status =
buffer_pool_.GetChunk(object_id, data_size, metadata_size, chunk_index);
ObjectBufferPool::ChunkInfo chunk_info = chunk_status.first;
// Fail on status not okay. The object is local, and there is
// no other anticipated error here.
ray::Status status = chunk_status.second;
if (!chunk_status.second.ok()) {
RAY_LOG(WARNING) << "Attempting to push object " << object_id
<< " which is not local. It may have been evicted.";
RAY_RETURN_NOT_OK(status);
}
push_request.set_data(chunk_info.data, chunk_info.buffer_length);
// record the time cost between send chunk and receive reply
rpc::ClientCallback<rpc::PushReply> callback = [this, start_time, object_id, client_id,
chunk_index](
const Status &status,
const rpc::PushReply &reply) {
// TODO: Just print warning here, should we try to resend this chunk?
if (!status.ok()) {
RAY_LOG(WARNING) << "Send object " << object_id << " chunk to client " << client_id
<< " failed due to" << status.message()
<< ", chunk index: " << chunk_index;
}
double end_time = absl::GetCurrentTimeNanos() / 1e9;
HandleSendFinished(object_id, client_id, chunk_index, start_time, end_time, status);
};
rpc_client->Push(push_request, callback);
// Do this regardless of whether it failed or succeeded.
buffer_pool_.ReleaseGetChunk(object_id, chunk_info.chunk_index);
return Status::OK();
}
void ObjectManager::CancelPull(const ObjectID &object_id) {
auto it = pull_requests_.find(object_id);
if (it == pull_requests_.end()) {
return;
}
RAY_CHECK_OK(object_directory_->UnsubscribeObjectLocations(
object_directory_pull_callback_id_, object_id));
pull_requests_.erase(it);
}
ray::Status ObjectManager::Wait(const std::vector<ObjectID> &object_ids,
int64_t timeout_ms, uint64_t num_required_objects,
bool wait_local, const WaitCallback &callback) {
UniqueID wait_id = UniqueID::FromRandom();
RAY_LOG(DEBUG) << "Wait request " << wait_id << " on " << self_node_id_;
RAY_RETURN_NOT_OK(AddWaitRequest(wait_id, object_ids, timeout_ms, num_required_objects,
wait_local, callback));
RAY_RETURN_NOT_OK(LookupRemainingWaitObjects(wait_id));
// LookupRemainingWaitObjects invokes SubscribeRemainingWaitObjects once lookup has
// been performed on all remaining objects.
return ray::Status::OK();
}
ray::Status ObjectManager::AddWaitRequest(const UniqueID &wait_id,
const std::vector<ObjectID> &object_ids,
int64_t timeout_ms,
uint64_t num_required_objects, bool wait_local,
const WaitCallback &callback) {
RAY_CHECK(timeout_ms >= 0 || timeout_ms == -1);
RAY_CHECK(num_required_objects != 0);
RAY_CHECK(num_required_objects <= object_ids.size())
<< num_required_objects << " " << object_ids.size();
if (object_ids.size() == 0) {
callback(std::vector<ObjectID>(), std::vector<ObjectID>());
}
// Initialize fields.
active_wait_requests_.emplace(wait_id, WaitState(*main_service_, timeout_ms, callback));
auto &wait_state = active_wait_requests_.find(wait_id)->second;
wait_state.object_id_order = object_ids;
wait_state.timeout_ms = timeout_ms;
wait_state.num_required_objects = num_required_objects;
wait_state.wait_local = wait_local;
for (const auto &object_id : object_ids) {
if (local_objects_.count(object_id) > 0) {
wait_state.found.insert(object_id);
} else {
wait_state.remaining.insert(object_id);
}
}
return ray::Status::OK();
}
ray::Status ObjectManager::LookupRemainingWaitObjects(const UniqueID &wait_id) {
auto &wait_state = active_wait_requests_.find(wait_id)->second;
if (wait_state.remaining.empty()) {
WaitComplete(wait_id);
} else {
// We invoke lookup calls immediately after checking which objects are local to
// obtain current information about the location of remote objects. Thus,
// we obtain information about all given objects, regardless of their location.
// This is required to ensure we do not bias returning locally available objects
// as ready whenever Wait is invoked with a mixture of local and remote objects.
for (const auto &object_id : wait_state.remaining) {
// Lookup remaining objects.
wait_state.requested_objects.insert(object_id);
RAY_RETURN_NOT_OK(object_directory_->LookupLocations(
object_id, [this, wait_id](const ObjectID &lookup_object_id,
const std::unordered_set<ClientID> &client_ids) {
auto &wait_state = active_wait_requests_.find(wait_id)->second;
// Note that the object is guaranteed to be added to local_objects_ before
// the notification is triggered.
if (local_objects_.count(lookup_object_id) > 0 ||
(!wait_state.wait_local && !client_ids.empty())) {
wait_state.remaining.erase(lookup_object_id);
wait_state.found.insert(lookup_object_id);
}
RAY_LOG(DEBUG) << "Wait request " << wait_id << ": " << client_ids.size()
<< " locations found for object " << lookup_object_id;
wait_state.requested_objects.erase(lookup_object_id);
if (wait_state.requested_objects.empty()) {
SubscribeRemainingWaitObjects(wait_id);
}
}));
}
}
return ray::Status::OK();
}
void ObjectManager::SubscribeRemainingWaitObjects(const UniqueID &wait_id) {
auto &wait_state = active_wait_requests_.find(wait_id)->second;
if (wait_state.found.size() >= wait_state.num_required_objects ||
wait_state.timeout_ms == 0) {
// Requirements already satisfied.
WaitComplete(wait_id);
return;
}
// There are objects remaining whose locations we don't know. Request their
// locations from the object directory.
for (const auto &object_id : wait_state.object_id_order) {
if (wait_state.remaining.count(object_id) > 0) {
RAY_LOG(DEBUG) << "Wait request " << wait_id << ": subscribing to object "
<< object_id;
wait_state.requested_objects.insert(object_id);
// Subscribe to object notifications.
RAY_CHECK_OK(object_directory_->SubscribeObjectLocations(
wait_id, object_id,
[this, wait_id](const ObjectID &subscribe_object_id,
const std::unordered_set<ClientID> &client_ids) {
auto object_id_wait_state = active_wait_requests_.find(wait_id);
if (object_id_wait_state == active_wait_requests_.end()) {
// Depending on the timing of calls to the object directory, we
// may get a subscription notification after the wait call has
// already completed. If so, then don't process the
// notification.
return;
}
auto &wait_state = object_id_wait_state->second;
// Note that the object is guaranteed to be added to local_objects_ before
// the notification is triggered.
if (local_objects_.count(subscribe_object_id) > 0 ||
(!wait_state.wait_local && !client_ids.empty())) {
RAY_LOG(DEBUG) << "Wait request " << wait_id
<< ": subscription notification received for object "
<< subscribe_object_id;
wait_state.remaining.erase(subscribe_object_id);
wait_state.found.insert(subscribe_object_id);
wait_state.requested_objects.erase(subscribe_object_id);
RAY_CHECK_OK(object_directory_->UnsubscribeObjectLocations(
wait_id, subscribe_object_id));
if (wait_state.found.size() >= wait_state.num_required_objects) {
WaitComplete(wait_id);
}
}
}));
}
// If a timeout was provided, then set a timer. If we don't find locations
// for enough objects by the time the timer expires, then we will return
// from the Wait.
if (wait_state.timeout_ms != -1) {
auto timeout = boost::posix_time::milliseconds(wait_state.timeout_ms);
wait_state.timeout_timer->expires_from_now(timeout);
wait_state.timeout_timer->async_wait(
[this, wait_id](const boost::system::error_code &error_code) {
if (error_code.value() != 0) {
return;
}
if (active_wait_requests_.find(wait_id) == active_wait_requests_.end()) {
// When a subscription callback is triggered first, WaitComplete will be
// called. The timer may at the same time goes off and may be an
// interruption will post WaitComplete to main_service_ the second time.
// This check will avoid the duplicated call of this function.
return;
}
WaitComplete(wait_id);
});
}
}
}
void ObjectManager::WaitComplete(const UniqueID &wait_id) {
auto iter = active_wait_requests_.find(wait_id);
RAY_CHECK(iter != active_wait_requests_.end());
auto &wait_state = iter->second;
// If we complete with outstanding requests, then timeout_ms should be non-zero or -1
// (infinite wait time).
if (!wait_state.requested_objects.empty()) {
RAY_CHECK(wait_state.timeout_ms > 0 || wait_state.timeout_ms == -1);
}
// Unsubscribe to any objects that weren't found in the time allotted.
for (const auto &object_id : wait_state.requested_objects) {
RAY_CHECK_OK(object_directory_->UnsubscribeObjectLocations(wait_id, object_id));
}
// Cancel the timer. This is okay even if the timer hasn't been started.
// The timer handler will be given a non-zero error code. The handler
// will do nothing on non-zero error codes.
wait_state.timeout_timer->cancel();
// Order objects according to input order.
std::vector<ObjectID> found;
std::vector<ObjectID> remaining;
for (const auto &item : wait_state.object_id_order) {
if (found.size() < wait_state.num_required_objects &&
wait_state.found.count(item) > 0) {
found.push_back(item);
} else {
remaining.push_back(item);
}
}
wait_state.callback(found, remaining);
active_wait_requests_.erase(wait_id);
RAY_LOG(DEBUG) << "Wait request " << wait_id << " finished: found " << found.size()
<< " remaining " << remaining.size();
}
/// Implementation of ObjectManagerServiceHandler
void ObjectManager::HandlePush(const rpc::PushRequest &request, rpc::PushReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
ClientID client_id = ClientID::FromBinary(request.client_id());
// Serialize.
uint64_t chunk_index = request.chunk_index();
uint64_t metadata_size = request.metadata_size();
uint64_t data_size = request.data_size();
const std::string &data = request.data();
double start_time = absl::GetCurrentTimeNanos() / 1e9;
auto status = ReceiveObjectChunk(client_id, object_id, data_size, metadata_size,
chunk_index, data);
double end_time = absl::GetCurrentTimeNanos() / 1e9;
HandleReceiveFinished(object_id, client_id, chunk_index, start_time, end_time, status);
send_reply_callback(status, nullptr, nullptr);
}
ray::Status ObjectManager::ReceiveObjectChunk(const ClientID &client_id,
const ObjectID &object_id,
uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index,
const std::string &data) {
RAY_LOG(DEBUG) << "ReceiveObjectChunk on " << self_node_id_ << " from " << client_id
<< " of object " << object_id << " chunk index: " << chunk_index
<< ", chunk data size: " << data.size()
<< ", object size: " << data_size;
std::pair<const ObjectBufferPool::ChunkInfo &, ray::Status> chunk_status =
buffer_pool_.CreateChunk(object_id, data_size, metadata_size, chunk_index);
ray::Status status;
ObjectBufferPool::ChunkInfo chunk_info = chunk_status.first;
if (chunk_status.second.ok()) {
// Avoid handling this chunk if it's already being handled by another process.
std::memcpy(chunk_info.data, data.data(), chunk_info.buffer_length);
buffer_pool_.SealChunk(object_id, chunk_index);
} else {
RAY_LOG(WARNING) << "ReceiveObjectChunk index " << chunk_index << " of object "
<< object_id << " failed: " << chunk_status.second.message();
// TODO(hme): If the object isn't local, create a pull request for this chunk.
}
return status;
}
void ObjectManager::HandlePull(const rpc::PullRequest &request, rpc::PullReply *reply,
rpc::SendReplyCallback send_reply_callback) {
ObjectID object_id = ObjectID::FromBinary(request.object_id());
ClientID client_id = ClientID::FromBinary(request.client_id());
RAY_LOG(DEBUG) << "Received pull request from client " << client_id << " for object ["
<< object_id << "].";
rpc::ProfileTableData::ProfileEvent profile_event;
profile_event.set_event_type("receive_pull_request");
profile_event.set_start_time(absl::GetCurrentTimeNanos() / 1e9);
profile_event.set_end_time(profile_event.start_time());
profile_event.set_extra_data("[\"" + object_id.Hex() + "\",\"" + client_id.Hex() +
"\"]");
{
std::lock_guard<std::mutex> lock(profile_mutex_);
profile_events_.emplace_back(profile_event);
}
main_service_->post([this, object_id, client_id]() { Push(object_id, client_id); });
send_reply_callback(Status::OK(), nullptr, nullptr);
}
void ObjectManager::HandleFreeObjects(const rpc::FreeObjectsRequest &request,
rpc::FreeObjectsReply *reply,
rpc::SendReplyCallback send_reply_callback) {
std::vector<ObjectID> object_ids;
for (const auto &e : request.object_ids()) {
object_ids.emplace_back(ObjectID::FromBinary(e));
}
FreeObjects(object_ids, /* local_only */ true);
send_reply_callback(Status::OK(), nullptr, nullptr);
}
void ObjectManager::FreeObjects(const std::vector<ObjectID> &object_ids,
bool local_only) {
buffer_pool_.FreeObjects(object_ids);
if (!local_only) {
const auto remote_connections = object_directory_->LookupAllRemoteConnections();
std::vector<std::shared_ptr<rpc::ObjectManagerClient>> rpc_clients;
for (const auto &connection_info : remote_connections) {
auto rpc_client = GetRpcClient(connection_info.client_id);
if (rpc_client != nullptr) {
rpc_clients.push_back(rpc_client);
}
}
rpc_service_.post([this, object_ids, rpc_clients]() {
SpreadFreeObjectsRequest(object_ids, rpc_clients);
});
}
}
void ObjectManager::SpreadFreeObjectsRequest(
const std::vector<ObjectID> &object_ids,
const std::vector<std::shared_ptr<rpc::ObjectManagerClient>> &rpc_clients) {
// This code path should be called from node manager.
rpc::FreeObjectsRequest free_objects_request;
for (const auto &e : object_ids) {
free_objects_request.add_object_ids(e.Binary());
}
for (auto &rpc_client : rpc_clients) {
rpc_client->FreeObjects(free_objects_request, [](const Status &status,
const rpc::FreeObjectsReply &reply) {
if (!status.ok()) {
RAY_LOG(WARNING) << "Send free objects request failed due to" << status.message();
}
});
}
}
std::shared_ptr<rpc::ObjectManagerClient> ObjectManager::GetRpcClient(
const ClientID &client_id) {
auto it = remote_object_manager_clients_.find(client_id);
if (it == remote_object_manager_clients_.end()) {
RemoteConnectionInfo connection_info(client_id);
object_directory_->LookupRemoteConnectionInfo(connection_info);
if (!connection_info.Connected()) {
return nullptr;
}
auto object_manager_client = std::make_shared<rpc::ObjectManagerClient>(
connection_info.ip, connection_info.port, client_call_manager_);
RAY_LOG(DEBUG) << "Get rpc client, address: " << connection_info.ip
<< ", port: " << connection_info.port
<< ", local port: " << GetServerPort();
it = remote_object_manager_clients_
.emplace(client_id, std::move(object_manager_client))
.first;
}
return it->second;
}
std::shared_ptr<rpc::ProfileTableData> ObjectManager::GetAndResetProfilingInfo() {
auto profile_info = std::make_shared<rpc::ProfileTableData>();
profile_info->set_component_type("object_manager");
profile_info->set_component_id(self_node_id_.Binary());
{
std::lock_guard<std::mutex> lock(profile_mutex_);
for (auto const &profile_event : profile_events_) {
profile_info->add_profile_events()->CopyFrom(profile_event);
}
profile_events_.clear();
}
return profile_info;
}
std::string ObjectManager::DebugString() const {
std::stringstream result;
result << "ObjectManager:";
result << "\n- num local objects: " << local_objects_.size();
result << "\n- num active wait requests: " << active_wait_requests_.size();
result << "\n- num unfulfilled push requests: " << unfulfilled_push_requests_.size();
result << "\n- num pull requests: " << pull_requests_.size();
result << "\n- num buffered profile events: " << profile_events_.size();
result << "\n" << object_directory_->DebugString();
result << "\n" << store_notification_.DebugString();
result << "\n" << buffer_pool_.DebugString();
return result.str();
}
void ObjectManager::RecordMetrics() const {
int64_t used_memory = 0;
for (const auto &it : local_objects_) {
object_manager::protocol::ObjectInfoT object_info = it.second.object_info;
used_memory += object_info.data_size + object_info.metadata_size;
}
stats::ObjectManagerStats().Record(used_memory,
{{stats::ValueTypeKey, "used_object_store_memory"}});
stats::ObjectManagerStats().Record(local_objects_.size(),
{{stats::ValueTypeKey, "num_local_objects"}});
stats::ObjectManagerStats().Record(active_wait_requests_.size(),
{{stats::ValueTypeKey, "num_active_wait_requests"}});
stats::ObjectManagerStats().Record(
unfulfilled_push_requests_.size(),
{{stats::ValueTypeKey, "num_unfulfilled_push_requests"}});
stats::ObjectManagerStats().Record(pull_requests_.size(),
{{stats::ValueTypeKey, "num_pull_requests"}});
stats::ObjectManagerStats().Record(profile_events_.size(),
{{stats::ValueTypeKey, "num_profile_events"}});
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_manager.h
|
C/C++ Header
|
#ifndef RAY_OBJECT_MANAGER_OBJECT_MANAGER_H
#define RAY_OBJECT_MANAGER_OBJECT_MANAGER_H
#include <algorithm>
#include <cstdint>
#include <deque>
#include <map>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include "absl/time/clock.h"
#include "plasma/client.h"
#include "ray/common/id.h"
#include "ray/common/ray_config.h"
#include "ray/common/status.h"
#include "ray/object_manager/format/object_manager_generated.h"
#include "ray/object_manager/object_buffer_pool.h"
#include "ray/object_manager/object_directory.h"
#include "ray/object_manager/object_store_notification_manager.h"
#include "ray/rpc/object_manager/object_manager_client.h"
#include "ray/rpc/object_manager/object_manager_server.h"
namespace ray {
struct ObjectManagerConfig {
/// The port that the object manager should use to listen for connections
/// from other object managers. If this is 0, the object manager will choose
/// its own port.
int object_manager_port;
/// The time in milliseconds to wait before retrying a pull
/// that fails due to client id lookup.
unsigned int pull_timeout_ms;
/// Object chunk size, in bytes
uint64_t object_chunk_size;
/// The store socket name.
std::string store_socket_name;
/// The time in milliseconds to wait until a Push request
/// fails due to unsatisfied local object. Special value:
/// Negative: waiting infinitely.
/// 0: giving up retrying immediately.
int push_timeout_ms;
/// Number of threads of rpc service
/// Send and receive request in these threads
int rpc_service_threads_number;
};
struct LocalObjectInfo {
/// Information from the object store about the object.
object_manager::protocol::ObjectInfoT object_info;
/// A map from the ID of a remote object manager to the timestamp of when
/// the object was last pushed to that object manager (if a push took place).
std::unordered_map<ClientID, int64_t> recent_pushes;
};
class ObjectManagerInterface {
public:
virtual ray::Status Pull(const ObjectID &object_id) = 0;
virtual void CancelPull(const ObjectID &object_id) = 0;
virtual ~ObjectManagerInterface(){};
};
// TODO(hme): Add success/failure callbacks for push and pull.
class ObjectManager : public ObjectManagerInterface,
public rpc::ObjectManagerServiceHandler {
public:
/// Implementation of object manager service
/// Handle push request from remote object manager
///
/// Push request will contain the object which is specified by pull request
/// the object will be transfered by a sequence of chunks.
///
/// \param request Push request including the object chunk data
/// \param reply Reply to the sender
/// \param send_reply_callback Callback of the request
void HandlePush(const rpc::PushRequest &request, rpc::PushReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle pull request from remote object manager
///
/// \param request Pull request
/// \param reply Reply
/// \param send_reply_callback Callback of request
void HandlePull(const rpc::PullRequest &request, rpc::PullReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle free objects request
///
/// \param request Free objects request
/// \param reply Reply
/// \param send_reply_callback
void HandleFreeObjects(const rpc::FreeObjectsRequest &request,
rpc::FreeObjectsReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Send object to remote object manager
///
/// Object will be transfered as a sequence of chunks, small object(defined in config)
/// contains only one chunk
/// \param push_id Unique push id to indicate this push request
/// \param object_id Object id
/// \param data_size Data size
/// \param metadata_size Metadata size
/// \param chunk_index Chunk index of this object chunk, start with 0
/// \param rpc_client Rpc client used to send message to remote object manager
ray::Status SendObjectChunk(const UniqueID &push_id, const ObjectID &object_id,
const ClientID &client_id, uint64_t data_size,
uint64_t metadata_size, uint64_t chunk_index,
std::shared_ptr<rpc::ObjectManagerClient> rpc_client);
/// Receive object chunk from remote object manager, small object may contain one chunk
///
/// \param client_id Client id of remote object manager which sends this chunk
/// \param object_id Object id
/// \param data_size Data size
/// \param metadata_size Metadata size
/// \param chunk_index Chunk index
/// \param data Chunk data
ray::Status ReceiveObjectChunk(const ClientID &client_id, const ObjectID &object_id,
uint64_t data_size, uint64_t metadata_size,
uint64_t chunk_index, const std::string &data);
/// Send pull request
///
/// \param object_id Object id
/// \param client_id Remote server client id
void SendPullRequest(const ObjectID &object_id, const ClientID &client_id,
std::shared_ptr<rpc::ObjectManagerClient> rpc_client);
/// Get the rpc client according to the client ID
///
/// \param client_id Remote client id, will send rpc request to it
std::shared_ptr<rpc::ObjectManagerClient> GetRpcClient(const ClientID &client_id);
/// Get the port of the object manager rpc server.
int GetServerPort() const { return object_manager_server_.GetPort(); }
public:
/// Takes user-defined ObjectDirectoryInterface implementation.
/// When this constructor is used, the ObjectManager assumes ownership of
/// the given ObjectDirectory instance.
///
/// \param main_service The main asio io_service.
/// \param config ObjectManager configuration.
/// \param object_directory An object implementing the object directory interface.
explicit ObjectManager(boost::asio::io_service &main_service,
const ClientID &self_node_id, const ObjectManagerConfig &config,
std::shared_ptr<ObjectDirectoryInterface> object_directory);
~ObjectManager();
/// Subscribe to notifications of objects added to local store.
/// Upon subscribing, the callback will be invoked for all objects that
///
/// already exist in the local store.
/// \param callback The callback to invoke when objects are added to the local store.
/// \return Status of whether adding the subscription succeeded.
ray::Status SubscribeObjAdded(
std::function<void(const object_manager::protocol::ObjectInfoT &)> callback);
/// Subscribe to notifications of objects deleted from local store.
///
/// \param callback The callback to invoke when objects are removed from the local
/// store.
/// \return Status of whether adding the subscription succeeded.
ray::Status SubscribeObjDeleted(std::function<void(const ray::ObjectID &)> callback);
/// Consider pushing an object to a remote object manager. This object manager
/// may choose to ignore the Push call (e.g., if Push is called twice in a row
/// on the same object, the second one might be ignored).
///
/// \param object_id The object's object id.
/// \param client_id The remote node's client id.
/// \return Void.
void Push(const ObjectID &object_id, const ClientID &client_id);
/// Pull an object from ClientID.
///
/// \param object_id The object's object id.
/// \return Status of whether the pull request successfully initiated.
ray::Status Pull(const ObjectID &object_id) override;
/// Try to Pull an object from one of its expected client locations. If there
/// are more client locations to try after this attempt, then this method
/// will try each of the other clients in succession, with a timeout between
/// each attempt. If the object is received or if the Pull is Canceled before
/// the timeout, then no more Pull requests for this object will be sent
/// to other node managers until TryPull is called again.
///
/// \param object_id The object's object id.
/// \return Void.
void TryPull(const ObjectID &object_id);
/// Cancels all requests (Push/Pull) associated with the given ObjectID. This
/// method is idempotent.
///
/// \param object_id The ObjectID.
/// \return Void.
void CancelPull(const ObjectID &object_id) override;
/// Callback definition for wait.
using WaitCallback = std::function<void(const std::vector<ray::ObjectID> &found,
const std::vector<ray::ObjectID> &remaining)>;
/// Wait until either num_required_objects are located or wait_ms has elapsed,
/// then invoke the provided callback.
///
/// \param object_ids The object ids to wait on.
/// \param timeout_ms The time in milliseconds to wait before invoking the callback.
/// \param num_required_objects The minimum number of objects required before
/// invoking the callback.
/// \param wait_local Whether to wait until objects arrive to this node's store.
/// \param callback Invoked when either timeout_ms is satisfied OR num_ready_objects
/// is satisfied.
/// \return Status of whether the wait successfully initiated.
ray::Status Wait(const std::vector<ObjectID> &object_ids, int64_t timeout_ms,
uint64_t num_required_objects, bool wait_local,
const WaitCallback &callback);
/// Free a list of objects from object store.
///
/// \param object_ids the The list of ObjectIDs to be deleted.
/// \param local_only Whether keep this request with local object store
/// or send it to all the object stores.
void FreeObjects(const std::vector<ObjectID> &object_ids, bool local_only);
/// Return profiling information and reset the profiling information.
///
/// \return All profiling information that has accumulated since the last call
/// to this method.
std::shared_ptr<rpc::ProfileTableData> GetAndResetProfilingInfo();
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
private:
friend class TestObjectManager;
struct PullRequest {
PullRequest() : retry_timer(nullptr), timer_set(false), client_locations() {}
std::unique_ptr<boost::asio::deadline_timer> retry_timer;
bool timer_set;
std::vector<ClientID> client_locations;
};
struct WaitState {
WaitState(boost::asio::io_service &service, int64_t timeout_ms,
const WaitCallback &callback)
: timeout_ms(timeout_ms),
timeout_timer(std::unique_ptr<boost::asio::deadline_timer>(
new boost::asio::deadline_timer(
service, boost::posix_time::milliseconds(timeout_ms)))),
callback(callback) {}
/// The period of time to wait before invoking the callback.
int64_t timeout_ms;
/// Whether to wait for objects to become local before returning.
bool wait_local;
/// The timer used whenever wait_ms > 0.
std::unique_ptr<boost::asio::deadline_timer> timeout_timer;
/// The callback invoked when WaitCallback is complete.
WaitCallback callback;
/// Ordered input object_ids.
std::vector<ObjectID> object_id_order;
/// The objects that have not yet been found.
std::unordered_set<ObjectID> remaining;
/// The objects that have been found. Note that if wait_local is true, then
/// this will only contain objects that are in local_objects_ too.
std::unordered_set<ObjectID> found;
/// Objects that have been requested either by Lookup or Subscribe.
std::unordered_set<ObjectID> requested_objects;
/// The number of required objects.
uint64_t num_required_objects;
};
/// Creates a wait request and adds it to active_wait_requests_.
ray::Status AddWaitRequest(const UniqueID &wait_id,
const std::vector<ObjectID> &object_ids, int64_t timeout_ms,
uint64_t num_required_objects, bool wait_local,
const WaitCallback &callback);
/// Lookup any remaining objects that are not local. This is invoked after
/// the wait request is created and local objects are identified.
ray::Status LookupRemainingWaitObjects(const UniqueID &wait_id);
/// Invoked when lookup for remaining objects has been invoked. This method subscribes
/// to any remaining objects if wait conditions have not yet been satisfied.
void SubscribeRemainingWaitObjects(const UniqueID &wait_id);
/// Completion handler for Wait.
void WaitComplete(const UniqueID &wait_id);
/// Spread the Free request to all objects managers.
///
/// \param object_ids the The list of ObjectIDs to be deleted.
void SpreadFreeObjectsRequest(
const std::vector<ObjectID> &object_ids,
const std::vector<std::shared_ptr<rpc::ObjectManagerClient>> &rpc_clients);
/// Handle starting, running, and stopping asio rpc_service.
void StartRpcService();
void RunRpcService();
void StopRpcService();
/// Handle an object being added to this node. This adds the object to the
/// directory, pushes the object to other nodes if necessary, and cancels any
/// outstanding Pull requests for the object.
void HandleObjectAdded(const object_manager::protocol::ObjectInfoT &object_info);
/// Register object remove with directory.
void NotifyDirectoryObjectDeleted(const ObjectID &object_id);
/// This is used to notify the main thread that the sending of a chunk has
/// completed.
///
/// \param object_id The ID of the object that was sent.
/// \param client_id The ID of the client that the chunk was sent to.
/// \param chunk_index The index of the chunk.
/// \param start_time_us The time when the object manager began sending the
/// chunk.
/// \param end_time_us The time when the object manager finished sending the
/// chunk.
/// \param status The status of the send (e.g., did it succeed or fail).
/// \return Void.
void HandleSendFinished(const ObjectID &object_id, const ClientID &client_id,
uint64_t chunk_index, double start_time_us, double end_time_us,
ray::Status status);
/// This is used to notify the main thread that the receiving of a chunk has
/// completed.
///
/// \param object_id The ID of the object that was received.
/// \param client_id The ID of the client that the chunk was received from.
/// \param chunk_index The index of the chunk.
/// \param start_time_us The time when the object manager began receiving the
/// chunk.
/// \param end_time_us The time when the object manager finished receiving the
/// chunk.
/// \param status The status of the receive (e.g., did it succeed or fail).
/// \return Void.
void HandleReceiveFinished(const ObjectID &object_id, const ClientID &client_id,
uint64_t chunk_index, double start_time_us,
double end_time_us, ray::Status status);
/// Handle Push task timeout.
void HandlePushTaskTimeout(const ObjectID &object_id, const ClientID &client_id);
ClientID self_node_id_;
const ObjectManagerConfig config_;
std::shared_ptr<ObjectDirectoryInterface> object_directory_;
ObjectStoreNotificationManager store_notification_;
ObjectBufferPool buffer_pool_;
/// Weak reference to main service. We ensure this object is destroyed before
/// main_service_ is stopped.
boost::asio::io_service *main_service_;
/// Multi-thread asio service, deal with all outgoing and incoming RPC request.
boost::asio::io_service rpc_service_;
/// Keep rpc service running when no task in rpc service.
boost::asio::io_service::work rpc_work_;
/// The thread pool used for running `rpc_service`.
/// Data copy operations during request are done in this thread pool.
std::vector<std::thread> rpc_threads_;
/// Mapping from locally available objects to information about those objects
/// including when the object was last pushed to other object managers.
std::unordered_map<ObjectID, LocalObjectInfo> local_objects_;
/// This is used as the callback identifier in Pull for
/// SubscribeObjectLocations. We only need one identifier because we never need to
/// subscribe multiple times to the same object during Pull.
UniqueID object_directory_pull_callback_id_ = UniqueID::FromRandom();
/// A set of active wait requests.
std::unordered_map<UniqueID, WaitState> active_wait_requests_;
/// Maintains a map of push requests that have not been fulfilled due to an object not
/// being local. Objects are removed from this map after push_timeout_ms have elapsed.
std::unordered_map<
ObjectID,
std::unordered_map<ClientID, std::unique_ptr<boost::asio::deadline_timer>>>
unfulfilled_push_requests_;
/// The objects that this object manager is currently trying to fetch from
/// remote object managers.
std::unordered_map<ObjectID, PullRequest> pull_requests_;
/// Profiling events that are to be batched together and added to the profile
/// table in the GCS.
std::vector<rpc::ProfileTableData::ProfileEvent> profile_events_;
/// mutex lock used to protect profile_events_, profile_events_ is used in main thread
/// and rpc thread.
std::mutex profile_mutex_;
/// Internally maintained random number generator.
std::mt19937_64 gen_;
/// The gPRC server.
rpc::GrpcServer object_manager_server_;
/// The gRPC service.
rpc::ObjectManagerGrpcService object_manager_service_;
/// The client call manager used to deal with reply.
rpc::ClientCallManager client_call_manager_;
/// Client id - object manager gRPC client.
std::unordered_map<ClientID, std::shared_ptr<rpc::ObjectManagerClient>>
remote_object_manager_clients_;
};
} // namespace ray
#endif // RAY_OBJECT_MANAGER_OBJECT_MANAGER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_store_notification_manager.cc
|
C++
|
#include <future>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include "ray/common/status.h"
#include "ray/common/common_protocol.h"
#include "ray/object_manager/object_store_notification_manager.h"
#include "ray/util/util.h"
namespace ray {
ObjectStoreNotificationManager::ObjectStoreNotificationManager(
boost::asio::io_service &io_service, const std::string &store_socket_name)
: store_client_(),
length_(0),
num_adds_processed_(0),
num_removes_processed_(0),
socket_(io_service) {
RAY_ARROW_CHECK_OK(store_client_.Connect(store_socket_name.c_str(), "", 0, 300));
RAY_ARROW_CHECK_OK(store_client_.Subscribe(&c_socket_));
boost::system::error_code ec;
#if defined(BOOST_ASIO_HAS_LOCAL_SOCKETS)
local_stream_protocol sp;
#else // TODO(mehrdadn): HACK: FIXME: This is just to get things compiling!
local_stream_protocol sp(AF_UNIX, 0);
#endif
socket_.assign(sp, c_socket_, ec);
assert(!ec.value());
NotificationWait();
}
ObjectStoreNotificationManager::~ObjectStoreNotificationManager() {
RAY_ARROW_CHECK_OK(store_client_.Disconnect());
}
void ObjectStoreNotificationManager::NotificationWait() {
boost::asio::async_read(socket_, boost::asio::buffer(&length_, sizeof(length_)),
boost::bind(&ObjectStoreNotificationManager::ProcessStoreLength,
this, boost::asio::placeholders::error));
}
void ObjectStoreNotificationManager::ProcessStoreLength(
const boost::system::error_code &error) {
notification_.resize(length_);
if (error) {
// When shutting down a cluster, it's possible that the plasma store is killed
// earlier than raylet, in this case we don't want raylet to crash, we instead
// log an error message and exit.
RAY_LOG(ERROR) << "Failed to process store length: "
<< boost_to_ray_status(error).ToString()
<< ", most likely plasma store is down, raylet will exit";
// Exit raylet process.
_exit(kRayletStoreErrorExitCode);
}
boost::asio::async_read(
socket_, boost::asio::buffer(notification_),
boost::bind(&ObjectStoreNotificationManager::ProcessStoreNotification, this,
boost::asio::placeholders::error));
}
void ObjectStoreNotificationManager::ProcessStoreNotification(
const boost::system::error_code &error) {
if (error) {
RAY_LOG(FATAL)
<< "Problem communicating with the object store from raylet, check logs or "
<< "dmesg for previous errors: " << boost_to_ray_status(error).ToString();
}
const auto &object_notification =
flatbuffers::GetRoot<object_manager::protocol::PlasmaNotification>(
notification_.data());
for (size_t i = 0; i < object_notification->object_info()->size(); ++i) {
auto object_info = object_notification->object_info()->Get(i);
const ObjectID object_id =
ObjectID::FromPlasmaIdBinary(object_info->object_id()->str());
if (object_info->is_deletion()) {
ProcessStoreRemove(object_id);
} else {
object_manager::protocol::ObjectInfoT result;
object_info->UnPackTo(&result);
ProcessStoreAdd(result);
}
}
NotificationWait();
}
void ObjectStoreNotificationManager::ProcessStoreAdd(
const object_manager::protocol::ObjectInfoT &object_info) {
for (auto &handler : add_handlers_) {
handler(object_info);
}
num_adds_processed_++;
}
void ObjectStoreNotificationManager::ProcessStoreRemove(const ObjectID &object_id) {
for (auto &handler : rem_handlers_) {
handler(object_id);
}
num_removes_processed_++;
}
void ObjectStoreNotificationManager::SubscribeObjAdded(
std::function<void(const object_manager::protocol::ObjectInfoT &)> callback) {
add_handlers_.push_back(std::move(callback));
}
void ObjectStoreNotificationManager::SubscribeObjDeleted(
std::function<void(const ObjectID &)> callback) {
rem_handlers_.push_back(std::move(callback));
}
std::string ObjectStoreNotificationManager::DebugString() const {
std::stringstream result;
result << "ObjectStoreNotificationManager:";
result << "\n- num adds processed: " << num_adds_processed_;
result << "\n- num removes processed: " << num_removes_processed_;
return result.str();
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/object_store_notification_manager.h
|
C/C++ Header
|
#ifndef RAY_OBJECT_MANAGER_OBJECT_STORE_CLIENT_H
#define RAY_OBJECT_MANAGER_OBJECT_STORE_CLIENT_H
#include <list>
#include <memory>
#include <vector>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include "plasma/client.h"
#include "ray/common/client_connection.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/object_manager/object_directory.h"
namespace ray {
/// \class ObjectStoreClientPool
///
/// Encapsulates notification handling from the object store.
class ObjectStoreNotificationManager {
public:
/// Constructor.
///
/// \param io_service The asio service to be used.
/// \param store_socket_name The store socket to connect to.
ObjectStoreNotificationManager(boost::asio::io_service &io_service,
const std::string &store_socket_name);
~ObjectStoreNotificationManager();
/// Subscribe to notifications of objects added to local store.
/// Upon subscribing, the callback will be invoked for all objects that
/// already exist in the local store
///
/// \param callback A callback expecting an ObjectID.
void SubscribeObjAdded(
std::function<void(const object_manager::protocol::ObjectInfoT &)> callback);
/// Subscribe to notifications of objects deleted from local store.
///
/// \param callback A callback expecting an ObjectID.
void SubscribeObjDeleted(std::function<void(const ray::ObjectID &)> callback);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
private:
/// Async loop for handling object store notifications.
void NotificationWait();
void ProcessStoreLength(const boost::system::error_code &error);
void ProcessStoreNotification(const boost::system::error_code &error);
/// Support for rebroadcasting object add/rem events.
void ProcessStoreAdd(const object_manager::protocol::ObjectInfoT &object_info);
void ProcessStoreRemove(const ObjectID &object_id);
std::vector<std::function<void(const object_manager::protocol::ObjectInfoT &)>>
add_handlers_;
std::vector<std::function<void(const ray::ObjectID &)>> rem_handlers_;
plasma::PlasmaClient store_client_;
int c_socket_;
int64_t length_;
int64_t num_adds_processed_;
int64_t num_removes_processed_;
std::vector<uint8_t> notification_;
local_stream_protocol::socket socket_;
};
} // namespace ray
#endif // RAY_OBJECT_MANAGER_OBJECT_STORE_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/test/object_manager_stress_test.cc
|
C++
|
#include <unistd.h>
#include <chrono>
#include <iostream>
#include <random>
#include <thread>
#include "gtest/gtest.h"
#include "ray/common/status.h"
#include "ray/object_manager/object_manager.h"
namespace ray {
using rpc::GcsNodeInfo;
std::string store_executable;
static inline void flushall_redis(void) {
redisContext *context = redisConnect("127.0.0.1", 6379);
freeReplyObject(redisCommand(context, "FLUSHALL"));
redisFree(context);
}
int64_t current_time_ms() {
std::chrono::milliseconds ms_since_epoch =
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now().time_since_epoch());
return ms_since_epoch.count();
}
class MockServer {
public:
MockServer(boost::asio::io_service &main_service,
const ObjectManagerConfig &object_manager_config,
std::shared_ptr<gcs::GcsClient> gcs_client)
: node_id_(ClientID::FromRandom()),
config_(object_manager_config),
gcs_client_(gcs_client),
object_manager_(main_service, node_id_, object_manager_config,
std::make_shared<ObjectDirectory>(main_service, gcs_client_)) {
RAY_CHECK_OK(RegisterGcs(main_service));
}
~MockServer() { RAY_CHECK_OK(gcs_client_->Nodes().UnregisterSelf()); }
private:
ray::Status RegisterGcs(boost::asio::io_service &io_service) {
auto object_manager_port = object_manager_.GetServerPort();
GcsNodeInfo node_info;
node_info.set_node_id(node_id_.Binary());
node_info.set_node_manager_address("127.0.0.1");
node_info.set_node_manager_port(object_manager_port);
node_info.set_object_manager_port(object_manager_port);
ray::Status status = gcs_client_->Nodes().RegisterSelf(node_info);
return status;
}
friend class StressTestObjectManager;
ClientID node_id_;
ObjectManagerConfig config_;
std::shared_ptr<gcs::GcsClient> gcs_client_;
ObjectManager object_manager_;
};
class TestObjectManagerBase : public ::testing::Test {
public:
TestObjectManagerBase() {}
std::string StartStore(const std::string &id) {
std::string store_id = "/tmp/store";
store_id = store_id + id;
std::string store_pid = store_id + ".pid";
std::string plasma_command = store_executable + " -m 1000000000 -s " + store_id +
" 1> /dev/null 2> /dev/null &" + " echo $! > " +
store_pid;
RAY_LOG(DEBUG) << plasma_command;
int ec = system(plasma_command.c_str());
RAY_CHECK(ec == 0);
sleep(1);
return store_id;
}
void StopStore(const std::string &store_id) {
std::string store_pid = store_id + ".pid";
std::string kill_1 = "kill -9 `cat " + store_pid + "`";
int s = system(kill_1.c_str());
ASSERT_TRUE(!s);
}
void SetUp() {
flushall_redis();
// start store
store_id_1 = StartStore(UniqueID::FromRandom().Hex());
store_id_2 = StartStore(UniqueID::FromRandom().Hex());
unsigned int pull_timeout_ms = 1000;
uint64_t object_chunk_size = static_cast<uint64_t>(std::pow(10, 3));
int push_timeout_ms = 10000;
// start first server
gcs::GcsClientOptions client_options("127.0.0.1", 6379, /*password*/ "",
/*is_test_client=*/true);
gcs_client_1 = std::make_shared<gcs::RedisGcsClient>(client_options);
RAY_CHECK_OK(gcs_client_1->Connect(main_service));
ObjectManagerConfig om_config_1;
om_config_1.store_socket_name = store_id_1;
om_config_1.pull_timeout_ms = pull_timeout_ms;
om_config_1.object_chunk_size = object_chunk_size;
om_config_1.push_timeout_ms = push_timeout_ms;
om_config_1.object_manager_port = 0;
om_config_1.rpc_service_threads_number = 3;
server1.reset(new MockServer(main_service, om_config_1, gcs_client_1));
// start second server
gcs_client_2 = std::make_shared<gcs::RedisGcsClient>(client_options);
RAY_CHECK_OK(gcs_client_2->Connect(main_service));
ObjectManagerConfig om_config_2;
om_config_2.store_socket_name = store_id_2;
om_config_2.pull_timeout_ms = pull_timeout_ms;
om_config_2.object_chunk_size = object_chunk_size;
om_config_2.push_timeout_ms = push_timeout_ms;
om_config_2.object_manager_port = 0;
om_config_2.rpc_service_threads_number = 3;
server2.reset(new MockServer(main_service, om_config_2, gcs_client_2));
// connect to stores.
RAY_ARROW_CHECK_OK(client1.Connect(store_id_1));
RAY_ARROW_CHECK_OK(client2.Connect(store_id_2));
}
void TearDown() {
arrow::Status client1_status = client1.Disconnect();
arrow::Status client2_status = client2.Disconnect();
ASSERT_TRUE(client1_status.ok() && client2_status.ok());
gcs_client_1->Disconnect();
gcs_client_2->Disconnect();
this->server1.reset();
this->server2.reset();
StopStore(store_id_1);
StopStore(store_id_2);
}
ObjectID WriteDataToClient(plasma::PlasmaClient &client, int64_t data_size) {
ObjectID object_id = ObjectID::FromRandom();
RAY_LOG(DEBUG) << "ObjectID Created: " << object_id;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
std::shared_ptr<Buffer> data;
RAY_ARROW_CHECK_OK(
client.Create(object_id.ToPlasmaId(), data_size, metadata, metadata_size, &data));
RAY_ARROW_CHECK_OK(client.Seal(object_id.ToPlasmaId()));
return object_id;
}
void object_added_handler_1(ObjectID object_id) { v1.push_back(object_id); };
void object_added_handler_2(ObjectID object_id) { v2.push_back(object_id); };
protected:
std::thread p;
boost::asio::io_service main_service;
std::shared_ptr<gcs::GcsClient> gcs_client_1;
std::shared_ptr<gcs::GcsClient> gcs_client_2;
std::unique_ptr<MockServer> server1;
std::unique_ptr<MockServer> server2;
plasma::PlasmaClient client1;
plasma::PlasmaClient client2;
std::vector<ObjectID> v1;
std::vector<ObjectID> v2;
std::string store_id_1;
std::string store_id_2;
};
class StressTestObjectManager : public TestObjectManagerBase {
public:
enum class TransferPattern {
PUSH_A_B,
PUSH_B_A,
BIDIRECTIONAL_PUSH,
PULL_A_B,
PULL_B_A,
BIDIRECTIONAL_PULL,
BIDIRECTIONAL_PULL_VARIABLE_DATA_SIZE,
};
int async_loop_index = -1;
size_t num_expected_objects;
std::vector<TransferPattern> async_loop_patterns = {
TransferPattern::PUSH_A_B,
TransferPattern::PUSH_B_A,
TransferPattern::BIDIRECTIONAL_PUSH,
TransferPattern::PULL_A_B,
TransferPattern::PULL_B_A,
TransferPattern::BIDIRECTIONAL_PULL,
TransferPattern::BIDIRECTIONAL_PULL_VARIABLE_DATA_SIZE};
int num_connected_clients = 0;
ClientID node_id_1;
ClientID node_id_2;
int64_t start_time;
void WaitConnections() {
node_id_1 = gcs_client_1->Nodes().GetSelfId();
node_id_2 = gcs_client_2->Nodes().GetSelfId();
RAY_CHECK_OK(gcs_client_1->Nodes().AsyncSubscribeToNodeChange(
[this](const ClientID &node_id, const GcsNodeInfo &data) {
if (node_id == node_id_1 || node_id == node_id_2) {
num_connected_clients += 1;
}
if (num_connected_clients == 4) {
StartTests();
}
},
nullptr));
RAY_CHECK_OK(gcs_client_2->Nodes().AsyncSubscribeToNodeChange(
[this](const ClientID &node_id, const GcsNodeInfo &data) {
if (node_id == node_id_1 || node_id == node_id_2) {
num_connected_clients += 1;
}
if (num_connected_clients == 4) {
StartTests();
}
},
nullptr));
}
void StartTests() {
TestConnections();
AddTransferTestHandlers();
TransferTestNext();
}
void AddTransferTestHandlers() {
ray::Status status = ray::Status::OK();
status = server1->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
object_added_handler_1(ObjectID::FromBinary(object_info.object_id));
if (v1.size() == num_expected_objects && v1.size() == v2.size()) {
TransferTestComplete();
}
});
RAY_CHECK_OK(status);
status = server2->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
object_added_handler_2(ObjectID::FromBinary(object_info.object_id));
if (v2.size() == num_expected_objects && v1.size() == v2.size()) {
TransferTestComplete();
}
});
RAY_CHECK_OK(status);
}
void TransferTestNext() {
async_loop_index += 1;
if ((size_t)async_loop_index < async_loop_patterns.size()) {
TransferPattern pattern = async_loop_patterns[async_loop_index];
TransferTestExecute(100, 3 * std::pow(10, 3) - 1, pattern);
} else {
main_service.stop();
}
}
plasma::ObjectBuffer GetObject(plasma::PlasmaClient &client, ObjectID &object_id) {
plasma::ObjectBuffer object_buffer;
plasma::ObjectID plasma_id = object_id.ToPlasmaId();
RAY_ARROW_CHECK_OK(client.Get(&plasma_id, 1, 0, &object_buffer));
return object_buffer;
}
static unsigned char *GetDigest(plasma::PlasmaClient &client, ObjectID &object_id) {
const int64_t size = sizeof(uint64_t);
static unsigned char digest_1[size];
RAY_ARROW_CHECK_OK(client.Hash(object_id.ToPlasmaId(), &digest_1[0]));
return digest_1;
}
void CompareObjects(ObjectID &object_id_1, ObjectID &object_id_2) {
plasma::ObjectBuffer object_buffer_1 = GetObject(client1, object_id_1);
plasma::ObjectBuffer object_buffer_2 = GetObject(client2, object_id_2);
uint8_t *data_1 = const_cast<uint8_t *>(object_buffer_1.data->data());
uint8_t *data_2 = const_cast<uint8_t *>(object_buffer_2.data->data());
ASSERT_EQ(object_buffer_1.data->size(), object_buffer_2.data->size());
ASSERT_EQ(object_buffer_1.metadata->size(), object_buffer_2.metadata->size());
int64_t total_size = object_buffer_1.data->size() + object_buffer_1.metadata->size();
RAY_LOG(DEBUG) << "total_size " << total_size;
for (int i = -1; ++i < total_size;) {
ASSERT_TRUE(data_1[i] == data_2[i]);
}
}
void CompareHashes(ObjectID &object_id_1, ObjectID &object_id_2) {
const int64_t size = sizeof(uint64_t);
static unsigned char *digest_1 = GetDigest(client1, object_id_1);
static unsigned char *digest_2 = GetDigest(client2, object_id_2);
for (int i = -1; ++i < size;) {
ASSERT_TRUE(digest_1[i] == digest_2[i]);
}
}
void TransferTestComplete() {
int64_t elapsed = current_time_ms() - start_time;
RAY_LOG(INFO) << "TransferTestComplete: "
<< static_cast<int>(async_loop_patterns[async_loop_index]) << " "
<< v1.size() << " " << elapsed;
ASSERT_TRUE(v1.size() == v2.size());
for (size_t i = 0; i < v1.size(); ++i) {
ASSERT_TRUE(std::find(v1.begin(), v1.end(), v2[i]) != v1.end());
}
// Compare objects and their hashes.
for (size_t i = 0; i < v1.size(); ++i) {
ObjectID object_id_2 = v2[i];
ObjectID object_id_1 =
v1[std::distance(v1.begin(), std::find(v1.begin(), v1.end(), v2[i]))];
CompareHashes(object_id_1, object_id_2);
CompareObjects(object_id_1, object_id_2);
}
v1.clear();
v2.clear();
TransferTestNext();
}
void TransferTestExecute(int num_trials, int64_t data_size,
TransferPattern transfer_pattern) {
ClientID node_id_1 = gcs_client_1->Nodes().GetSelfId();
ClientID node_id_2 = gcs_client_2->Nodes().GetSelfId();
ray::Status status = ray::Status::OK();
if (transfer_pattern == TransferPattern::BIDIRECTIONAL_PULL ||
transfer_pattern == TransferPattern::BIDIRECTIONAL_PUSH ||
transfer_pattern == TransferPattern::BIDIRECTIONAL_PULL_VARIABLE_DATA_SIZE) {
num_expected_objects = (size_t)2 * num_trials;
} else {
num_expected_objects = (size_t)num_trials;
}
start_time = current_time_ms();
switch (transfer_pattern) {
case TransferPattern::PUSH_A_B: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid1 = WriteDataToClient(client1, data_size);
server1->object_manager_.Push(oid1, node_id_2);
}
} break;
case TransferPattern::PUSH_B_A: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid2 = WriteDataToClient(client2, data_size);
server2->object_manager_.Push(oid2, node_id_1);
}
} break;
case TransferPattern::BIDIRECTIONAL_PUSH: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid1 = WriteDataToClient(client1, data_size);
server1->object_manager_.Push(oid1, node_id_2);
ObjectID oid2 = WriteDataToClient(client2, data_size);
server2->object_manager_.Push(oid2, node_id_1);
}
} break;
case TransferPattern::PULL_A_B: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid1 = WriteDataToClient(client1, data_size);
status = server2->object_manager_.Pull(oid1);
}
} break;
case TransferPattern::PULL_B_A: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid2 = WriteDataToClient(client2, data_size);
status = server1->object_manager_.Pull(oid2);
}
} break;
case TransferPattern::BIDIRECTIONAL_PULL: {
for (int i = -1; ++i < num_trials;) {
ObjectID oid1 = WriteDataToClient(client1, data_size);
status = server2->object_manager_.Pull(oid1);
ObjectID oid2 = WriteDataToClient(client2, data_size);
status = server1->object_manager_.Pull(oid2);
}
} break;
case TransferPattern::BIDIRECTIONAL_PULL_VARIABLE_DATA_SIZE: {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(1, 50);
for (int i = -1; ++i < num_trials;) {
ObjectID oid1 = WriteDataToClient(client1, data_size + dis(gen));
status = server2->object_manager_.Pull(oid1);
ObjectID oid2 = WriteDataToClient(client2, data_size + dis(gen));
status = server1->object_manager_.Pull(oid2);
}
} break;
default: {
RAY_LOG(FATAL) << "No case for transfer_pattern "
<< static_cast<int>(transfer_pattern);
} break;
}
}
void TestConnections() {
RAY_LOG(DEBUG) << "\n"
<< "Server node ids:"
<< "\n";
ClientID node_id_1 = gcs_client_1->Nodes().GetSelfId();
ClientID node_id_2 = gcs_client_2->Nodes().GetSelfId();
RAY_LOG(DEBUG) << "Server 1: " << node_id_1 << "\n"
<< "Server 2: " << node_id_2;
RAY_LOG(DEBUG) << "\n"
<< "All connected nodes:"
<< "\n";
auto data = gcs_client_1->Nodes().Get(node_id_1);
RAY_LOG(DEBUG) << "NodeID=" << ClientID::FromBinary(data->node_id()) << "\n"
<< "NodeIp=" << data->node_manager_address() << "\n"
<< "NodePort=" << data->node_manager_port();
auto data2 = gcs_client_1->Nodes().Get(node_id_2);
RAY_LOG(DEBUG) << "NodeID=" << ClientID::FromBinary(data2->node_id()) << "\n"
<< "NodeIp=" << data2->node_manager_address() << "\n"
<< "NodePort=" << data2->node_manager_port();
}
};
TEST_F(StressTestObjectManager, StartStressTestObjectManager) {
auto AsyncStartTests = main_service.wrap([this]() { WaitConnections(); });
AsyncStartTests();
main_service.run();
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
ray::store_executable = std::string(argv[1]);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/object_manager/test/object_manager_test.cc
|
C++
|
#include <unistd.h>
#include <iostream>
#include <thread>
#include "gtest/gtest.h"
#include "ray/common/status.h"
#include "ray/object_manager/object_manager.h"
namespace {
std::string store_executable;
int64_t wait_timeout_ms;
} // namespace
namespace ray {
using rpc::GcsNodeInfo;
static inline void flushall_redis(void) {
redisContext *context = redisConnect("127.0.0.1", 6379);
freeReplyObject(redisCommand(context, "FLUSHALL"));
redisFree(context);
}
class MockServer {
public:
MockServer(boost::asio::io_service &main_service,
const ObjectManagerConfig &object_manager_config,
std::shared_ptr<gcs::GcsClient> gcs_client)
: node_id_(ClientID::FromRandom()),
config_(object_manager_config),
gcs_client_(gcs_client),
object_manager_(main_service, node_id_, object_manager_config,
std::make_shared<ObjectDirectory>(main_service, gcs_client_)) {
RAY_CHECK_OK(RegisterGcs(main_service));
}
~MockServer() { RAY_CHECK_OK(gcs_client_->Nodes().UnregisterSelf()); }
private:
ray::Status RegisterGcs(boost::asio::io_service &io_service) {
auto object_manager_port = object_manager_.GetServerPort();
GcsNodeInfo node_info;
node_info.set_node_id(node_id_.Binary());
node_info.set_node_manager_address("127.0.0.1");
node_info.set_node_manager_port(object_manager_port);
node_info.set_object_manager_port(object_manager_port);
ray::Status status = gcs_client_->Nodes().RegisterSelf(node_info);
return status;
}
friend class TestObjectManager;
ClientID node_id_;
ObjectManagerConfig config_;
std::shared_ptr<gcs::GcsClient> gcs_client_;
ObjectManager object_manager_;
};
class TestObjectManagerBase : public ::testing::Test {
public:
TestObjectManagerBase() {}
std::string StartStore(const std::string &id) {
std::string store_id = "/tmp/store";
store_id = store_id + id;
std::string store_pid = store_id + ".pid";
std::string plasma_command = store_executable + " -m 1000000000 -s " + store_id +
" 1> /dev/null 2> /dev/null &" + " echo $! > " +
store_pid;
RAY_LOG(DEBUG) << plasma_command;
int ec = system(plasma_command.c_str());
RAY_CHECK(ec == 0);
sleep(1);
return store_id;
}
void StopStore(std::string store_id) {
std::string store_pid = store_id + ".pid";
std::string kill_1 = "kill -9 `cat " + store_pid + "`";
ASSERT_TRUE(!system(kill_1.c_str()));
}
void SetUp() {
flushall_redis();
// start store
store_id_1 = StartStore(UniqueID::FromRandom().Hex());
store_id_2 = StartStore(UniqueID::FromRandom().Hex());
unsigned int pull_timeout_ms = 1;
push_timeout_ms = 1000;
// start first server
gcs::GcsClientOptions client_options("127.0.0.1", 6379, /*password*/ "",
/*is_test_client=*/true);
gcs_client_1 = std::make_shared<gcs::RedisGcsClient>(client_options);
RAY_CHECK_OK(gcs_client_1->Connect(main_service));
ObjectManagerConfig om_config_1;
om_config_1.store_socket_name = store_id_1;
om_config_1.pull_timeout_ms = pull_timeout_ms;
om_config_1.object_chunk_size = object_chunk_size;
om_config_1.push_timeout_ms = push_timeout_ms;
om_config_1.object_manager_port = 0;
om_config_1.rpc_service_threads_number = 3;
server1.reset(new MockServer(main_service, om_config_1, gcs_client_1));
// start second server
gcs_client_2 = std::make_shared<gcs::RedisGcsClient>(client_options);
RAY_CHECK_OK(gcs_client_2->Connect(main_service));
ObjectManagerConfig om_config_2;
om_config_2.store_socket_name = store_id_2;
om_config_2.pull_timeout_ms = pull_timeout_ms;
om_config_2.object_chunk_size = object_chunk_size;
om_config_2.push_timeout_ms = push_timeout_ms;
om_config_2.object_manager_port = 0;
om_config_2.rpc_service_threads_number = 3;
server2.reset(new MockServer(main_service, om_config_2, gcs_client_2));
// connect to stores.
RAY_ARROW_CHECK_OK(client1.Connect(store_id_1));
RAY_ARROW_CHECK_OK(client2.Connect(store_id_2));
}
void TearDown() {
arrow::Status client1_status = client1.Disconnect();
arrow::Status client2_status = client2.Disconnect();
ASSERT_TRUE(client1_status.ok() && client2_status.ok());
gcs_client_1->Disconnect();
gcs_client_2->Disconnect();
this->server1.reset();
this->server2.reset();
StopStore(store_id_1);
StopStore(store_id_2);
}
ObjectID WriteDataToClient(plasma::PlasmaClient &client, int64_t data_size) {
return WriteDataToClient(client, data_size, ObjectID::FromRandom());
}
ObjectID WriteDataToClient(plasma::PlasmaClient &client, int64_t data_size,
ObjectID object_id) {
RAY_LOG(DEBUG) << "ObjectID Created: " << object_id;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
std::shared_ptr<Buffer> data;
RAY_ARROW_CHECK_OK(
client.Create(object_id.ToPlasmaId(), data_size, metadata, metadata_size, &data));
RAY_ARROW_CHECK_OK(client.Seal(object_id.ToPlasmaId()));
return object_id;
}
void object_added_handler_1(ObjectID object_id) { v1.push_back(object_id); };
void object_added_handler_2(ObjectID object_id) { v2.push_back(object_id); };
protected:
std::thread p;
boost::asio::io_service main_service;
std::shared_ptr<gcs::GcsClient> gcs_client_1;
std::shared_ptr<gcs::GcsClient> gcs_client_2;
std::unique_ptr<MockServer> server1;
std::unique_ptr<MockServer> server2;
plasma::PlasmaClient client1;
plasma::PlasmaClient client2;
std::vector<ObjectID> v1;
std::vector<ObjectID> v2;
std::string store_id_1;
std::string store_id_2;
unsigned int push_timeout_ms;
uint64_t object_chunk_size = static_cast<uint64_t>(std::pow(10, 3));
};
class TestObjectManager : public TestObjectManagerBase {
public:
int current_wait_test = -1;
int num_connected_clients = 0;
ClientID node_id_1;
ClientID node_id_2;
ObjectID created_object_id1;
ObjectID created_object_id2;
std::unique_ptr<boost::asio::deadline_timer> timer;
void WaitConnections() {
node_id_1 = gcs_client_1->Nodes().GetSelfId();
node_id_2 = gcs_client_2->Nodes().GetSelfId();
RAY_CHECK_OK(gcs_client_1->Nodes().AsyncSubscribeToNodeChange(
[this](const ClientID &node_id, const GcsNodeInfo &data) {
if (node_id == node_id_1 || node_id == node_id_2) {
num_connected_clients += 1;
}
if (num_connected_clients == 2) {
StartTests();
}
},
nullptr));
}
void StartTests() {
TestConnections();
TestNotifications();
}
void TestNotifications() {
ray::Status status = ray::Status::OK();
status = server1->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
object_added_handler_1(ObjectID::FromBinary(object_info.object_id));
NotificationTestCompleteIfSatisfied();
});
RAY_CHECK_OK(status);
status = server2->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
object_added_handler_2(ObjectID::FromBinary(object_info.object_id));
NotificationTestCompleteIfSatisfied();
});
RAY_CHECK_OK(status);
size_t data_size = 1000000;
// dummy_id is not local. The push function will timeout.
ObjectID dummy_id = ObjectID::FromRandom();
server1->object_manager_.Push(dummy_id, gcs_client_2->Nodes().GetSelfId());
created_object_id1 = ObjectID::FromRandom();
WriteDataToClient(client1, data_size, created_object_id1);
// Server1 holds Object1 so this Push call will success.
server1->object_manager_.Push(created_object_id1, gcs_client_2->Nodes().GetSelfId());
// This timer is used to guarantee that the Push function for dummy_id will timeout.
timer.reset(new boost::asio::deadline_timer(main_service));
auto period = boost::posix_time::milliseconds(push_timeout_ms + 10);
timer->expires_from_now(period);
created_object_id2 = ObjectID::FromRandom();
timer->async_wait([this, data_size](const boost::system::error_code &error) {
WriteDataToClient(client2, data_size, created_object_id2);
});
}
void NotificationTestCompleteIfSatisfied() {
size_t num_expected_objects1 = 1;
size_t num_expected_objects2 = 2;
if (v1.size() == num_expected_objects1 && v2.size() == num_expected_objects2) {
SubscribeObjectThenWait();
}
}
void SubscribeObjectThenWait() {
int data_size = 100;
// Test to ensure Wait works properly during an active subscription to the same
// object.
ObjectID object_1 = WriteDataToClient(client2, data_size);
ObjectID object_2 = WriteDataToClient(client2, data_size);
UniqueID sub_id = ray::UniqueID::FromRandom();
RAY_CHECK_OK(server1->object_manager_.object_directory_->SubscribeObjectLocations(
sub_id, object_1,
[this, sub_id, object_1, object_2](
const ray::ObjectID &object_id,
const std::unordered_set<ray::ClientID> &clients) {
if (!clients.empty()) {
TestWaitWhileSubscribed(sub_id, object_1, object_2);
}
}));
}
void TestWaitWhileSubscribed(UniqueID sub_id, ObjectID object_1, ObjectID object_2) {
int required_objects = 1;
int timeout_ms = 1000;
std::vector<ObjectID> object_ids = {object_1, object_2};
boost::posix_time::ptime start_time = boost::posix_time::second_clock::local_time();
UniqueID wait_id = UniqueID::FromRandom();
RAY_CHECK_OK(server1->object_manager_.AddWaitRequest(
wait_id, object_ids, timeout_ms, required_objects, false,
[this, sub_id, object_1, object_ids, start_time](
const std::vector<ray::ObjectID> &found,
const std::vector<ray::ObjectID> &remaining) {
int64_t elapsed = (boost::posix_time::second_clock::local_time() - start_time)
.total_milliseconds();
RAY_LOG(DEBUG) << "elapsed " << elapsed;
RAY_LOG(DEBUG) << "found " << found.size();
RAY_LOG(DEBUG) << "remaining " << remaining.size();
RAY_CHECK(found.size() == 1);
// There's nothing more to test. A check will fail if unexpected behavior is
// triggered.
RAY_CHECK_OK(
server1->object_manager_.object_directory_->UnsubscribeObjectLocations(
sub_id, object_1));
NextWaitTest();
}));
// Skip lookups and rely on Subscribe only to test subscribe interaction.
server1->object_manager_.SubscribeRemainingWaitObjects(wait_id);
}
void NextWaitTest() {
int data_size = 600;
current_wait_test += 1;
switch (current_wait_test) {
case 0: {
// Ensure timeout_ms = 0 is handled correctly.
// Out of 5 objects, we expect 3 ready objects and 2 remaining objects.
TestWait(data_size, 5, 3, /*timeout_ms=*/0, false, false);
} break;
case 1: {
// Ensure timeout_ms = 1000 is handled correctly.
// Out of 5 objects, we expect 3 ready objects and 2 remaining objects.
TestWait(data_size, 5, 3, wait_timeout_ms, false, false);
} break;
case 2: {
// Generate objects locally to ensure local object code-path works properly.
// Out of 5 objects, we expect 3 ready objects and 2 remaining objects.
TestWait(data_size, 5, 3, wait_timeout_ms, false, /*test_local=*/true);
} break;
case 3: {
// Wait on an object that's never registered with GCS to ensure timeout works
// properly.
TestWait(data_size, /*num_objects=*/5, /*required_objects=*/6, wait_timeout_ms,
/*include_nonexistent=*/true, false);
} break;
case 4: {
// Ensure infinite time code-path works properly.
TestWait(data_size, 5, 5, /*timeout_ms=*/-1, false, false);
} break;
}
}
void TestWait(int data_size, int num_objects, uint64_t required_objects, int timeout_ms,
bool include_nonexistent, bool test_local) {
std::vector<ObjectID> object_ids;
for (int i = -1; ++i < num_objects;) {
ObjectID oid;
if (test_local) {
oid = WriteDataToClient(client1, data_size);
} else {
oid = WriteDataToClient(client2, data_size);
}
object_ids.push_back(oid);
}
if (include_nonexistent) {
num_objects += 1;
object_ids.push_back(ObjectID::FromRandom());
}
boost::posix_time::ptime start_time = boost::posix_time::second_clock::local_time();
RAY_CHECK_OK(server1->object_manager_.Wait(
object_ids, timeout_ms, required_objects, false,
[this, object_ids, num_objects, timeout_ms, required_objects, start_time](
const std::vector<ray::ObjectID> &found,
const std::vector<ray::ObjectID> &remaining) {
int64_t elapsed = (boost::posix_time::second_clock::local_time() - start_time)
.total_milliseconds();
RAY_LOG(DEBUG) << "elapsed " << elapsed;
RAY_LOG(DEBUG) << "found " << found.size();
RAY_LOG(DEBUG) << "remaining " << remaining.size();
// Ensure object order is preserved for all invocations.
size_t j = 0;
size_t k = 0;
for (size_t i = 0; i < object_ids.size(); ++i) {
ObjectID oid = object_ids[i];
// Make sure the object is in either the found vector or the remaining vector.
if (j < found.size() && found[j] == oid) {
j += 1;
}
if (k < remaining.size() && remaining[k] == oid) {
k += 1;
}
}
if (!found.empty()) {
ASSERT_EQ(j, found.size());
}
if (!remaining.empty()) {
ASSERT_EQ(k, remaining.size());
}
switch (current_wait_test) {
case 0: {
// Ensure timeout_ms = 0 returns expected number of found and remaining
// objects.
ASSERT_TRUE(found.size() <= required_objects);
ASSERT_TRUE(static_cast<int>(found.size() + remaining.size()) == num_objects);
NextWaitTest();
} break;
case 1: {
// Ensure lookup succeeds as expected when timeout_ms = 1000.
ASSERT_TRUE(found.size() >= required_objects);
ASSERT_TRUE(static_cast<int>(found.size() + remaining.size()) == num_objects);
NextWaitTest();
} break;
case 2: {
// Ensure lookup succeeds as expected when objects are local.
ASSERT_TRUE(found.size() >= required_objects);
ASSERT_TRUE(static_cast<int>(found.size() + remaining.size()) == num_objects);
NextWaitTest();
} break;
case 3: {
// Ensure lookup returns after timeout_ms elapses when one object doesn't
// exist.
ASSERT_TRUE(elapsed >= timeout_ms);
ASSERT_TRUE(static_cast<int>(found.size() + remaining.size()) == num_objects);
NextWaitTest();
} break;
case 4: {
// Ensure timeout_ms = -1 works properly.
ASSERT_TRUE(static_cast<int>(found.size()) == num_objects);
ASSERT_TRUE(remaining.size() == 0);
TestWaitComplete();
} break;
}
}));
}
void TestWaitComplete() { main_service.stop(); }
void TestConnections() {
RAY_LOG(DEBUG) << "\n"
<< "Server node ids:"
<< "\n";
auto data = gcs_client_1->Nodes().Get(node_id_1);
RAY_LOG(DEBUG) << (ClientID::FromBinary(data->node_id()).IsNil());
RAY_LOG(DEBUG) << "Server 1 NodeID=" << ClientID::FromBinary(data->node_id());
RAY_LOG(DEBUG) << "Server 1 NodeIp=" << data->node_manager_address();
RAY_LOG(DEBUG) << "Server 1 NodePort=" << data->node_manager_port();
ASSERT_EQ(node_id_1, ClientID::FromBinary(data->node_id()));
auto data2 = gcs_client_1->Nodes().Get(node_id_2);
RAY_LOG(DEBUG) << "Server 2 NodeID=" << ClientID::FromBinary(data2->node_id());
RAY_LOG(DEBUG) << "Server 2 NodeIp=" << data2->node_manager_address();
RAY_LOG(DEBUG) << "Server 2 NodePort=" << data2->node_manager_port();
ASSERT_EQ(node_id_2, ClientID::FromBinary(data2->node_id()));
}
};
TEST_F(TestObjectManager, StartTestObjectManager) {
// TODO: Break this test suite into unit tests.
auto AsyncStartTests = main_service.wrap([this]() { WaitConnections(); });
AsyncStartTests();
main_service.run();
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
store_executable = std::string(argv[1]);
wait_timeout_ms = std::stoi(std::string(argv[2]));
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/common.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
option java_package = "org.ray.runtime.generated";
// Language of a task or worker.
enum Language {
PYTHON = 0;
JAVA = 1;
CPP = 2;
}
// Type of a worker.
enum WorkerType {
WORKER = 0;
DRIVER = 1;
}
// Type of a task.
enum TaskType {
// Normal task.
NORMAL_TASK = 0;
// Actor creation task.
ACTOR_CREATION_TASK = 1;
// Actor task.
ACTOR_TASK = 2;
}
// Address of a worker or node manager.
message Address {
bytes raylet_id = 1;
string ip_address = 2;
int32 port = 3;
// Optional unique id for the worker.
bytes worker_id = 4;
}
/// The task specification encapsulates all immutable information about the
/// task. These fields are determined at submission time, converse to the
/// `TaskExecutionSpec` may change at execution time.
message TaskSpec {
// Type of this task.
TaskType type = 1;
// Language of this task.
Language language = 2;
// Function descriptor of this task, which is a list of strings that can
// uniquely describe the function to execute.
// For a Python function, it should be: [module_name, class_name, function_name]
// For a Java function, it should be: [class_name, method_name, type_descriptor]
repeated bytes function_descriptor = 3;
// ID of the job that this task belongs to.
bytes job_id = 4;
// Task ID of the task.
bytes task_id = 5;
// Task ID of the parent task.
bytes parent_task_id = 6;
// A count of the number of tasks submitted by the parent task before this one.
uint64 parent_counter = 7;
// Task ID of the caller. This is the same as parent_task_id for non-actors.
// This is the actor ID (embedded in a nil task ID) for actors.
bytes caller_id = 8;
/// Address of the caller.
Address caller_address = 9;
// Task arguments.
repeated TaskArg args = 10;
// Number of return objects.
uint64 num_returns = 11;
// Quantities of the different resources required by this task.
map<string, double> required_resources = 12;
// The resources required for placing this task on a node. If this is empty,
// then the placement resources are equal to the required_resources.
map<string, double> required_placement_resources = 13;
// Task specification for an actor creation task.
// This field is only valid when `type == ACTOR_CREATION_TASK`.
ActorCreationTaskSpec actor_creation_task_spec = 14;
// Task specification for an actor task.
// This field is only valid when `type == ACTOR_TASK`.
ActorTaskSpec actor_task_spec = 15;
// Whether this task is a direct call task.
bool is_direct_call = 16;
// Number of times this task may be retried on worker failure.
int32 max_retries = 17;
}
// Argument in the task.
message TaskArg {
// Object IDs for pass-by-reference arguments. Normally there is only one
// object ID in this list which represents the object that is being passed.
// However to support reducers in a MapReduce workload, we also support
// passing multiple object IDs for each argument.
repeated bytes object_ids = 1;
// Data for pass-by-value arguments.
bytes data = 2;
// Metadata for pass-by-value arguments.
bytes metadata = 3;
}
// Task spec of an actor creation task.
message ActorCreationTaskSpec {
// ID of the actor that will be created by this task.
bytes actor_id = 2;
// The max number of times this actor should be recontructed.
// If this number of 0 or negative, the actor won't be reconstructed on failure.
uint64 max_actor_reconstructions = 3;
// The dynamic options used in the worker command when starting a worker process for
// an actor creation task. If the list isn't empty, the options will be used to replace
// the placeholder strings (`RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_0`,
// `RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_1`, etc) in the worker command.
repeated string dynamic_worker_options = 4;
// Whether direct actor call is used.
bool is_direct_call = 5;
// The max number of concurrent calls for direct call actors.
int32 max_concurrency = 6;
// Whether the actor is persistent
bool is_detached = 7;
// Whether the actor use async actor calls
bool is_asyncio = 8;
}
// Task spec of an actor task.
message ActorTaskSpec {
// Actor ID of the actor that this task is executed on.
bytes actor_id = 2;
// The dummy object ID of the actor creation task.
bytes actor_creation_dummy_object_id = 4;
// Number of tasks that have been submitted to this actor so far.
uint64 actor_counter = 5;
// The dummy object ID of the previous actor task.
bytes previous_actor_task_dummy_object_id = 7;
}
// The task execution specification encapsulates all mutable information about
// the task. These fields may change at execution time, converse to the
// `TaskSpec` is determined at submission time.
message TaskExecutionSpec {
// The last time this task was received for scheduling.
double last_timestamp = 2;
// The number of times this task was spilled back by raylets.
uint64 num_forwards = 3;
}
// Represents a task, including task spec, and task execution spec.
message Task {
TaskSpec task_spec = 1;
TaskExecutionSpec task_execution_spec = 2;
}
// Represents a resource id.
message ResourceId {
// The index of the resource (i.e., CPU #3).
int64 index = 1;
// The quantity of the resource assigned (i.e., 0.5 CPU).
double quantity = 2;
}
// Represents a set of resource ids.
message ResourceMapEntry {
// The name of the resource (i.e., "CPU").
string name = 1;
// The set of resource ids assigned.
repeated ResourceId resource_ids = 2;
}
message ViewData {
message Measure {
// A short string that describes the tags for this mesaure, e.g.,
// "Tag1:Value1,Tag2:Value2,Tag3:Value3"
string tags = 1;
// Int64 type value (if present).
int64 int_value = 2;
// Double type value (if present).
double double_value = 3;
// Distribution type value (if present).
double distribution_min = 4;
double distribution_mean = 5;
double distribution_max = 6;
double distribution_count = 7;
repeated double distribution_bucket_boundaries = 8;
repeated double distribution_bucket_counts = 9;
}
// The name of this Census view.
string view_name = 1;
// The list of measures recorded under this view.
repeated Measure measures = 2;
}
// Debug info returned from the core worker.
message CoreWorkerStats {
// Debug string of the currently executing task.
string current_task_desc = 1;
// Number of pending normal and actor tasks.
int32 num_pending_tasks = 2;
// Number of object ids in local scope.
int32 num_object_ids_in_scope = 3;
// Function descriptor of the currently executing task.
repeated bytes current_task_func_desc = 4;
// IP address of the core worker.
string ip_address = 6;
// Port of the core worker.
int64 port = 7;
// Actor ID.
bytes actor_id = 8;
// A map from the resource name (e.g. "CPU") to the amount of resource used.
map<string, double> used_resources = 9;
// A string displayed on Dashboard.
string webui_display = 10;
// Number of objects stored in local memory.
int32 num_local_objects = 11;
// Used local object store memory.
int64 used_object_store_memory = 12;
// Length of the task queue.
int32 task_queue_length = 13;
// Number of executed tasks.
int32 num_executed_tasks = 14;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/core_worker.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
import "src/ray/protobuf/common.proto";
message ActiveObjectIDs {
repeated bytes object_ids = 1;
}
// Persistent state of an ActorHandle.
message ActorHandle {
// ID of the actor.
bytes actor_id = 1;
// ID of the job that created the actor (it is possible that the handle
// exists on a job with a different job ID).
bytes creation_job_id = 3;
// Language of the actor.
Language actor_language = 4;
// Function descriptor of actor creation task.
repeated string actor_creation_task_function_descriptor = 5;
// The unique id of the dummy object returned by the actor creation task.
// It's used as a dependency for the first task.
// TODO: Remove this once scheduling is done by task counter only.
bytes actor_cursor = 6;
// Whether direct actor call is used.
bool is_direct_call = 7;
}
message AssignTaskRequest {
// The ID of the worker this message is intended for. This is used to
// ensure that workers don't try to execute tasks assigned to workers
// that used to be bound to the same port.
bytes intended_worker_id = 1;
// The task to be pushed.
Task task = 2;
// A list of the resources reserved for this worker.
// TODO(zhijunfu): `resource_ids` is represented as
// flatbutters-serialized bytes, will be moved to protobuf later.
bytes resource_ids = 3;
}
message AssignTaskReply {
}
message ReturnObject {
// Object ID.
bytes object_id = 1;
// If set, indicates the data is in plasma instead of inline. This
// means that data and metadata will be empty.
bool in_plasma = 2;
// Data of the object.
bytes data = 3;
// Metadata of the object.
bytes metadata = 4;
}
message PushTaskRequest {
// The ID of the worker this message is intended for.
bytes intended_worker_id = 1;
// Address of the caller.
Address caller_address = 2;
// The task to be pushed.
TaskSpec task_spec = 3;
// The sequence number of the task for this client. This must increase
// sequentially starting from zero for each actor handle. The server
// will guarantee tasks execute in this sequence, waiting for any
// out-of-order request messages to arrive as necessary.
// If set to -1, ordering is disabled and the task executes immediately.
// This mode of behaviour is used for direct task submission only.
int64 sequence_number = 4;
// The max sequence number the client has processed responses for. This
// is a performance optimization that allows the client to tell the server
// to cancel any PushTaskRequests with seqno <= this value, rather than
// waiting for the server to time out waiting for missing messages.
int64 client_processed_up_to = 5;
// Resource mapping ids assigned to the worker executing the task.
repeated ResourceMapEntry resource_mapping = 6;
}
message PushTaskReply {
// The returned objects.
repeated ReturnObject return_objects = 1;
// Set to true if the worker will be exiting.
bool worker_exiting = 2;
}
message DirectActorCallArgWaitCompleteRequest {
// The ID of the worker this message is intended for.
bytes intended_worker_id = 1;
// Id used to uniquely identify this request. This is sent back to the core
// worker to notify the wait has completed.
int64 tag = 2;
}
message DirectActorCallArgWaitCompleteReply {
}
message GetObjectStatusRequest {
// The owner of the object. Note that we do not need to include
// intended_worker_id since the new worker can service this request too by
// inspecting the owner_id field.
bytes owner_id = 1;
// Wait for this object's status.
bytes object_id = 2;
}
message GetObjectStatusReply {
enum ObjectStatus {
CREATED = 0;
}
ObjectStatus status = 1;
}
message WaitForObjectEvictionRequest {
// The ID of the worker this message is intended for.
bytes intended_worker_id = 1;
// ObjectID of the pinned object.
bytes object_id = 2;
}
message WaitForObjectEvictionReply {
}
message KillActorRequest {
// ID of the actor that is intended to be killed.
bytes intended_actor_id = 1;
}
message KillActorReply {
}
message GetCoreWorkerStatsRequest {
// The ID of the worker this message is intended for.
bytes intended_worker_id = 1;
}
message GetCoreWorkerStatsReply {
// Debug information returned from the core worker.
CoreWorkerStats core_worker_stats = 1;
}
service CoreWorkerService {
// Push a task to a worker from the raylet.
rpc AssignTask(AssignTaskRequest) returns (AssignTaskReply);
// Push a task directly to this worker from another.
rpc PushTask(PushTaskRequest) returns (PushTaskReply);
// Reply from raylet that wait for direct actor call args has completed.
rpc DirectActorCallArgWaitComplete(DirectActorCallArgWaitCompleteRequest)
returns (DirectActorCallArgWaitCompleteReply);
// Ask the object's owner about the object's current status.
rpc GetObjectStatus(GetObjectStatusRequest) returns (GetObjectStatusReply);
// Notify the object's owner that it has been pinned by a raylet. Replying
// to this message indicates that the raylet should unpin the object.
rpc WaitForObjectEviction(WaitForObjectEvictionRequest)
returns (WaitForObjectEvictionReply);
// Request that the worker shut down without completing outstanding work.
rpc KillActor(KillActorRequest) returns (KillActorReply);
// Get metrics from core workers.
rpc GetCoreWorkerStats(GetCoreWorkerStatsRequest) returns (GetCoreWorkerStatsReply);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/gcs.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
import "src/ray/protobuf/common.proto";
option java_package = "org.ray.runtime.generated";
// These indexes are mapped to strings in ray_redis_module.cc.
enum TablePrefix {
TABLE_PREFIX_MIN = 0;
UNUSED = 1;
TASK = 2;
RAYLET_TASK = 3;
CLIENT = 4;
OBJECT = 5;
ACTOR = 6;
FUNCTION = 7;
TASK_RECONSTRUCTION = 8;
HEARTBEAT = 9;
HEARTBEAT_BATCH = 10;
ERROR_INFO = 11;
JOB = 12;
PROFILE = 13;
TASK_LEASE = 14;
ACTOR_CHECKPOINT = 15;
ACTOR_CHECKPOINT_ID = 16;
NODE_RESOURCE = 17;
DIRECT_ACTOR = 18;
WORKER_FAILURE = 19;
TABLE_PREFIX_MAX = 20;
}
// The channel that Add operations to the Table should be published on, if any.
enum TablePubsub {
TABLE_PUBSUB_MIN = 0;
NO_PUBLISH = 1;
TASK_PUBSUB = 2;
RAYLET_TASK_PUBSUB = 3;
CLIENT_PUBSUB = 4;
OBJECT_PUBSUB = 5;
ACTOR_PUBSUB = 6;
HEARTBEAT_PUBSUB = 7;
HEARTBEAT_BATCH_PUBSUB = 8;
ERROR_INFO_PUBSUB = 9;
TASK_LEASE_PUBSUB = 10;
JOB_PUBSUB = 11;
NODE_RESOURCE_PUBSUB = 12;
DIRECT_ACTOR_PUBSUB = 13;
WORKER_FAILURE_PUBSUB = 14;
TABLE_PUBSUB_MAX = 15;
}
enum GcsChangeMode {
APPEND_OR_ADD = 0;
REMOVE = 1;
}
message GcsEntry {
GcsChangeMode change_mode = 1;
bytes id = 2;
repeated bytes entries = 3;
}
message ObjectTableData {
// The size of the object.
uint64 object_size = 1;
// The node manager ID that this object appeared on or was evicted by.
bytes manager = 2;
}
message TaskReconstructionData {
// The ID of task.
bytes task_id = 1;
// The number of times this task has been reconstructed so far.
uint64 num_reconstructions = 2;
// The node manager that is trying to reconstruct the task.
bytes node_manager_id = 3;
}
message TaskTableData {
Task task = 1;
}
message ActorTableData {
// State of an actor.
enum ActorState {
// Actor is alive.
ALIVE = 0;
// Actor is dead, now being reconstructed.
// After reconstruction finishes, the state will become alive again.
RECONSTRUCTING = 1;
// Actor is already dead and won't be reconstructed.
DEAD = 2;
}
// The ID of the actor that was created.
bytes actor_id = 1;
// The ID of the caller of the actor creation task.
bytes parent_id = 2;
// The dummy object ID returned by the actor creation task. If the actor
// dies, then this is the object that should be reconstructed for the actor
// to be recreated.
bytes actor_creation_dummy_object_id = 3;
// The ID of the job that created the actor.
bytes job_id = 4;
// Current state of this actor.
ActorState state = 6;
// Max number of times this actor should be reconstructed.
uint64 max_reconstructions = 7;
// Remaining number of reconstructions.
uint64 remaining_reconstructions = 8;
// The address of the the actor.
Address address = 9;
// The address of the the actor's owner (parent).
Address owner_address = 10;
// Whether direct actor call is used.
bool is_direct_call = 11;
// Whether the actor is persistent.
bool is_detached = 12;
// Timestamp that the actor is created or reconstructed.
double timestamp = 13;
}
message ErrorTableData {
// The ID of the job that the error is for.
bytes job_id = 1;
// The type of the error.
string type = 2;
// The error message.
string error_message = 3;
// The timestamp of the error message.
double timestamp = 4;
}
message ProfileTableData {
// Represents a profile event.
message ProfileEvent {
// The type of the event.
string event_type = 1;
// The start time of the event.
double start_time = 2;
// The end time of the event. If the event is a point event, then this should
// be the same as the start time.
double end_time = 3;
// Additional data associated with the event. This data must be serialized
// using JSON.
string extra_data = 4;
}
// The type of the component that generated the event, e.g., worker or
// object_manager, or node_manager.
string component_type = 1;
// An identifier for the component that generated the event.
bytes component_id = 2;
// An identifier for the node that generated the event.
string node_ip_address = 3;
// This is a batch of profiling events. We batch these together for
// performance reasons because a single task may generate many events, and
// we don't want each event to require a GCS command.
repeated ProfileEvent profile_events = 4;
}
message ResourceTableData {
// The total capacity of this resource type.
double resource_capacity = 1;
}
message GcsNodeInfo {
// State of a node.
enum GcsNodeState {
// Node is alive.
ALIVE = 0;
// Node is dead.
DEAD = 1;
}
// The ID of node.
bytes node_id = 1;
// The IP address of the node manager.
string node_manager_address = 2;
// The IPC socket name of raylet.
string raylet_socket_name = 3;
// The IPC socket name of the node's plasma store.
string object_store_socket_name = 4;
// The port at which the node manager is listening for TCP
// connections from other node managers.
int32 node_manager_port = 5;
// The port at which the object manager is listening for TCP
// connections from other object managers.
int32 object_manager_port = 6;
// Current state of this node.
GcsNodeState state = 7;
// The Hostname address of the node manager.
string node_manager_hostname = 8;
}
message HeartbeatTableData {
// Node manager client id
bytes client_id = 1;
// TODO(hchen): Define the following resources in map format.
// Resource capacity currently available on this node manager.
repeated string resources_available_label = 2;
repeated double resources_available_capacity = 3;
// Total resource capacity configured for this node manager.
repeated string resources_total_label = 4;
repeated double resources_total_capacity = 5;
// Aggregate outstanding resource load on this node manager.
repeated string resource_load_label = 6;
repeated double resource_load_capacity = 7;
// Object IDs that are in use by workers on this node manager's node.
repeated bytes active_object_id = 8;
}
message HeartbeatBatchTableData {
repeated HeartbeatTableData batch = 1;
}
// Data for a lease on task execution.
message TaskLeaseData {
// The task ID.
bytes task_id = 1;
// Node manager client ID.
bytes node_manager_id = 2;
// The time that the lease was last acquired at. NOTE(swang): This is the
// system clock time according to the node that added the entry and is not
// synchronized with other nodes.
uint64 acquired_at = 3;
// The period that the lease is active for.
uint64 timeout = 4;
}
message JobTableData {
// The job ID.
bytes job_id = 1;
// Whether it's dead.
bool is_dead = 2;
// The UNIX timestamp corresponding to this event (job added or removed).
int64 timestamp = 3;
// IP of the node this job was started on.
string node_manager_address = 4;
// Process ID of the driver running this job.
int64 driver_pid = 5;
}
// This table stores the actor checkpoint data. An actor checkpoint
// is the snapshot of an actor's state in the actor registration.
// See `actor_registration.h` for more detailed explanation of these fields.
message ActorCheckpointData {
// ID of this checkpoint.
bytes checkpoint_id = 1;
// ID of this actor.
bytes actor_id = 2;
// The dummy object ID of actor's most recently executed task.
bytes execution_dependency = 3;
// A list of IDs of this actor's handles.
repeated bytes handle_ids = 4;
// The task counters of the above handles.
repeated uint64 task_counters = 5;
// The frontier dependencies of the above handles.
repeated bytes frontier_dependencies = 6;
// A list of unreleased dummy objects from this actor.
repeated bytes unreleased_dummy_objects = 7;
// The numbers of dependencies for the above unreleased dummy objects.
repeated uint32 num_dummy_object_dependencies = 8;
}
// This table stores the actor-to-available-checkpoint-ids mapping.
message ActorCheckpointIdData {
// ID of this actor.
bytes actor_id = 1;
// IDs of this actor's available checkpoints.
repeated bytes checkpoint_ids = 2;
// A list of the timestamps for each of the above `checkpoint_ids`.
repeated uint64 timestamps = 3;
}
message WorkerFailureData {
// Address of the worker that failed.
Address worker_address = 1;
// The UNIX timestamp at which the worker failed.
int64 timestamp = 3;
}
// This enum type is used as object's metadata to indicate the object's creating
// task has failed because of a certain error.
// TODO(hchen): We may want to make these errors more specific. E.g., we may want
// to distinguish between intentional and expected actor failures, and between
// worker process failure and node failure.
enum ErrorType {
// Indicates that a task failed because the worker died unexpectedly while executing it.
WORKER_DIED = 0;
// Indicates that a task failed because the actor died unexpectedly before finishing it.
ACTOR_DIED = 1;
// Indicates that an object is lost and cannot be reconstructed.
// Note, this currently only happens to actor objects. When the actor's state is already
// after the object's creating task, the actor cannot re-run the task.
// TODO(hchen): we may want to reuse this error type for more cases. E.g.,
// 1) A object that was put by the driver.
// 2) The object's creating task is already cleaned up from GCS (this currently
// crashes raylet).
OBJECT_UNRECONSTRUCTABLE = 2;
// Indicates that a task failed due to user code failure.
TASK_EXECUTION_EXCEPTION = 3;
// Indicates that the object has been placed in plasma. This error shouldn't ever be
// exposed to user code; it is only used internally to indicate the result of a direct
// call has been placed in plasma.
OBJECT_IN_PLASMA = 4;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/gcs_service.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
import "src/ray/protobuf/gcs.proto";
message AddJobRequest {
JobTableData data = 1;
}
message AddJobReply {
bool success = 1;
}
message MarkJobFinishedRequest {
bytes job_id = 1;
}
message MarkJobFinishedReply {
bool success = 1;
}
// Service for job info access.
service JobInfoGcsService {
// Add job to GCS Service.
rpc AddJob(AddJobRequest) returns (AddJobReply);
// Mark job as finished to GCS Service.
rpc MarkJobFinished(MarkJobFinishedRequest) returns (MarkJobFinishedReply);
}
message GetActorInfoRequest {
// ID of this actor.
bytes actor_id = 1;
}
message GetActorInfoReply {
// Data of actor.
ActorTableData actor_table_data = 1;
}
message RegisterActorInfoRequest {
// Data of actor.
ActorTableData actor_table_data = 1;
}
message RegisterActorInfoReply {
}
message UpdateActorInfoRequest {
// ID of this actor.
bytes actor_id = 1;
// Data of actor.
ActorTableData actor_table_data = 2;
}
message UpdateActorInfoReply {
}
message AddActorCheckpointRequest {
ActorCheckpointData checkpoint_data = 1;
}
message AddActorCheckpointReply {
}
message GetActorCheckpointRequest {
bytes checkpoint_id = 1;
}
message GetActorCheckpointReply {
ActorCheckpointData checkpoint_data = 1;
}
message GetActorCheckpointIDRequest {
bytes actor_id = 1;
}
message GetActorCheckpointIDReply {
ActorCheckpointIdData checkpoint_id_data = 1;
}
// Service for actor info access.
service ActorInfoGcsService {
// Get actor data from GCS Service.
rpc GetActorInfo(GetActorInfoRequest) returns (GetActorInfoReply);
// Register an actor to GCS Service.
rpc RegisterActorInfo(RegisterActorInfoRequest) returns (RegisterActorInfoReply);
// Update actor info in GCS Service.
rpc UpdateActorInfo(UpdateActorInfoRequest) returns (UpdateActorInfoReply);
// Add actor checkpoint data to GCS Service.
rpc AddActorCheckpoint(AddActorCheckpointRequest) returns (AddActorCheckpointReply);
// Get actor checkpoint data from GCS Service.
rpc GetActorCheckpoint(GetActorCheckpointRequest) returns (GetActorCheckpointReply);
// Get actor checkpoint id data from GCS Service.
rpc GetActorCheckpointID(GetActorCheckpointIDRequest)
returns (GetActorCheckpointIDReply);
}
message RegisterNodeRequest {
// Info of node.
GcsNodeInfo node_info = 1;
}
message RegisterNodeReply {
}
message UnregisterNodeRequest {
// The ID of node.
bytes node_id = 1;
}
message UnregisterNodeReply {
}
message GetAllNodeInfoRequest {
}
message GetAllNodeInfoReply {
repeated GcsNodeInfo node_info_list = 1;
}
message ReportHeartbeatRequest {
HeartbeatTableData heartbeat = 1;
}
message ReportHeartbeatReply {
}
message ReportBatchHeartbeatRequest {
HeartbeatBatchTableData heartbeat_batch = 1;
}
message ReportBatchHeartbeatReply {
}
message GetResourcesRequest {
bytes node_id = 1;
}
message GetResourcesReply {
map<string, ResourceTableData> resources = 1;
}
message UpdateResourcesRequest {
bytes node_id = 1;
map<string, ResourceTableData> resources = 2;
}
message UpdateResourcesReply {
}
message DeleteResourcesRequest {
bytes node_id = 1;
repeated string resource_name_list = 2;
}
message DeleteResourcesReply {
}
// Service for node info access.
service NodeInfoGcsService {
// Register a node to GCS Service.
rpc RegisterNode(RegisterNodeRequest) returns (RegisterNodeReply);
// Unregister a node from GCS Service.
rpc UnregisterNode(UnregisterNodeRequest) returns (UnregisterNodeReply);
// Get information of all nodes from GCS Service.
rpc GetAllNodeInfo(GetAllNodeInfoRequest) returns (GetAllNodeInfoReply);
// Report heartbeat of a node to GCS Service.
rpc ReportHeartbeat(ReportHeartbeatRequest) returns (ReportHeartbeatReply);
// Report batch heartbeat to GCS Service.
rpc ReportBatchHeartbeat(ReportBatchHeartbeatRequest)
returns (ReportBatchHeartbeatReply);
// Get node's resources from GCS Service.
rpc GetResources(GetResourcesRequest) returns (GetResourcesReply);
// Update resources of a node in GCS Service.
rpc UpdateResources(UpdateResourcesRequest) returns (UpdateResourcesReply);
// Delete resources of a node in GCS Service.
rpc DeleteResources(DeleteResourcesRequest) returns (DeleteResourcesReply);
}
message GetObjectLocationsRequest {
// The ID of object to lookup in GCS Service.
bytes object_id = 1;
}
message GetObjectLocationsReply {
// Data of object
repeated ObjectTableData object_table_data_list = 1;
}
message AddObjectLocationRequest {
// The ID of object which location will be added to GCS Service.
bytes object_id = 1;
// The location that will be added to GCS Service.
bytes node_id = 2;
}
message AddObjectLocationReply {
}
message RemoveObjectLocationRequest {
// The ID of object which location will be removed from GCS Service.
bytes object_id = 1;
// The location that will be removed from GCS Service.
bytes node_id = 2;
}
message RemoveObjectLocationReply {
}
// Service for object info access.
service ObjectInfoGcsService {
// Get object's locations from GCS Service.
rpc GetObjectLocations(GetObjectLocationsRequest) returns (GetObjectLocationsReply);
// Add location of object to GCS Service.
rpc AddObjectLocation(AddObjectLocationRequest) returns (AddObjectLocationReply);
// Remove location of object from GCS Service.
rpc RemoveObjectLocation(RemoveObjectLocationRequest)
returns (RemoveObjectLocationReply);
}
message AddTaskRequest {
TaskTableData task_data = 1;
}
message AddTaskReply {
}
message GetTaskRequest {
bytes task_id = 1;
}
message GetTaskReply {
TaskTableData task_data = 1;
}
message DeleteTasksRequest {
repeated bytes task_id_list = 1;
}
message DeleteTasksReply {
}
message AddTaskLeaseRequest {
TaskLeaseData task_lease_data = 1;
}
message AddTaskLeaseReply {
}
message AttemptTaskReconstructionRequest {
TaskReconstructionData task_reconstruction = 1;
}
message AttemptTaskReconstructionReply {
}
// Service for task info access.
service TaskInfoGcsService {
// Add a task to GCS Service.
rpc AddTask(AddTaskRequest) returns (AddTaskReply);
// Get task information from GCS Service.
rpc GetTask(GetTaskRequest) returns (GetTaskReply);
// Delete tasks from GCS Service.
rpc DeleteTasks(DeleteTasksRequest) returns (DeleteTasksReply);
// Add a task lease to GCS Service.
rpc AddTaskLease(AddTaskLeaseRequest) returns (AddTaskLeaseReply);
// Attempt task reconstruction to GCS Service.
rpc AttemptTaskReconstruction(AttemptTaskReconstructionRequest)
returns (AttemptTaskReconstructionReply);
}
message AddProfileDataRequest {
ProfileTableData profile_data = 1;
}
message AddProfileDataReply {
}
// Service for stats access.
service StatsGcsService {
// Add profile data to GCS Service.
rpc AddProfileData(AddProfileDataRequest) returns (AddProfileDataReply);
}
message ReportJobErrorRequest {
ErrorTableData error_data = 1;
}
message ReportJobErrorReply {
}
// Service for error info access.
service ErrorInfoGcsService {
// Report a job error to GCS Service.
rpc ReportJobError(ReportJobErrorRequest) returns (ReportJobErrorReply);
}
message ReportWorkerFailureRequest {
WorkerFailureData worker_failure = 1;
}
message ReportWorkerFailureReply {
}
// Service for worker info access.
service WorkerInfoGcsService {
// Report a worker failure to GCS Service.
rpc ReportWorkerFailure(ReportWorkerFailureRequest) returns (ReportWorkerFailureReply);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/node_manager.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
import "src/ray/protobuf/common.proto";
// Request a worker from the raylet with the specified resources.
message RequestWorkerLeaseRequest {
// TaskSpec containing the requested resources.
TaskSpec resource_spec = 1;
}
message RequestWorkerLeaseReply {
// Address of the leased worker. If this is empty, then the request should be
// retried at the provided raylet address.
Address worker_address = 1;
// Address of the raylet to spill back to, if any.
Address retry_at_raylet_address = 2;
// Resource mapping ids acquired by the leased worker.
repeated ResourceMapEntry resource_mapping = 3;
}
// Release a worker back to its raylet.
message ReturnWorkerRequest {
// Port of the leased worker that we are now returning.
int32 worker_port = 1;
// Unique id of the leased worker we are now returning.
bytes worker_id = 2;
// If true, there was some unrecoverable error and the raylet should
// disconnect the worker.
bool disconnect_worker = 3;
}
message ReturnWorkerReply {
}
message ForwardTaskRequest {
// The ID of the task to be forwarded.
bytes task_id = 1;
// The tasks in the uncommitted lineage of the forwarded task. This
// should include task_id.
repeated Task uncommitted_tasks = 2;
}
message ForwardTaskReply {
}
message PinObjectIDsRequest {
// Address of the owner to ask when to unpin the objects.
Address owner_address = 1;
// ObjectIDs to pin.
repeated bytes object_ids = 2;
}
message PinObjectIDsReply {
}
message GetNodeStatsRequest {
}
message WorkerStats {
// PID of the worker process.
uint32 pid = 1;
// Whether this is a driver.
bool is_driver = 2;
// Debug information returned from the core worker.
CoreWorkerStats core_worker_stats = 3;
}
message GetNodeStatsReply {
repeated WorkerStats workers_stats = 1;
repeated ViewData view_data = 2;
uint32 num_workers = 3;
repeated TaskSpec infeasible_tasks = 4;
}
// Service for inter-node-manager communication.
service NodeManagerService {
// Request a worker from the raylet.
rpc RequestWorkerLease(RequestWorkerLeaseRequest) returns (RequestWorkerLeaseReply);
// Release a worker back to its raylet.
rpc ReturnWorker(ReturnWorkerRequest) returns (ReturnWorkerReply);
// Forward a task and its uncommitted lineage to the remote node manager.
rpc ForwardTask(ForwardTaskRequest) returns (ForwardTaskReply);
// Pin the provided object IDs.
rpc PinObjectIDs(PinObjectIDsRequest) returns (PinObjectIDsReply);
// Get the current node stats.
rpc GetNodeStats(GetNodeStatsRequest) returns (GetNodeStatsReply);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/object_manager.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.rpc;
message PushRequest {
// The push ID to allow the receiver to differentiate different push attempts
// from the same sender.
bytes push_id = 1;
// The object ID being transferred.
bytes object_id = 2;
// The client ID of client sending this object
bytes client_id = 3;
// The index of the chunk being transferred.
uint32 chunk_index = 4;
// The data_size include object_size and metadata_size
uint64 data_size = 5;
// The metadata size.
uint64 metadata_size = 6;
// The chunk data
bytes data = 7;
}
message PullRequest {
// ID of the requesting client.
bytes client_id = 1;
// Requested ObjectID.
bytes object_id = 2;
}
message FreeObjectsRequest {
repeated bytes object_ids = 1;
}
// Reply for request
message PushReply {
}
message PullReply {
}
message FreeObjectsReply {
}
service ObjectManagerService {
// Push service used to send object chunks
rpc Push(PushRequest) returns (PushReply);
// Try to pull object from remote object manager
rpc Pull(PullRequest) returns (PullReply);
// Tell remote object manager to free some objects
rpc FreeObjects(FreeObjectsRequest) returns (FreeObjectsReply);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/protobuf/serialization.proto
|
Protocol Buffers
|
syntax = "proto3";
package ray.serialization;
// This is the protocol for python object serialization with pickle5.
//
// ## About Pickle 5 Protocol
// Pickle5 will create two things during serialization:
// 1. Inband data. This is the framed pickle data for most objects.
// 2. Buffers. They are python buffers referring internal data of objects.
// They contain metadata of the buffer and a native pointer.
// Thus they provide interface for zero-copy serialization.
//
// ## Protobuf object
// A PythonObject protobuf object will be created for each python object.
// Unfortunately, protobuf object has a 2GB memory limit and cannot support zero-copy,
// so we have to put inband data and raw buffer contents outside. Thus PythonObject
// will only store buffer metadata, the offset and size of inband data, and the
// offset and length of raw buffers object.
//
// ## Python object serialization memory layout
// This section describes the memory layout in the Plasma store buffer.
// Unfortunately, no frame info is included in protobuf data, so we have to specify
// the length and offset of PythonObject.
// ---------------------
// i64 offset(PythonObject):
// Offset of the PythonObject relative to the start of this buffer.
// i64 len(PythonObject):
// Length of the PythonObject.
// inband_data | pad(64)
// Inband data, padded with 64 bytes for the alignment of buffers.
// buffers | pad(8)
// Raw data of buffers, padded with 8 bytes for the alignment of PythonObject.
// PythonObject
// PythonObject is stored at the end because its size will be variable.
// ---------------------
// The message for metadata of python buffer objects.
message PythonBuffer {
// The offset of the buffer relative to the beginning of the raw buffer section,
// which is stored in 'PythonObject'.
uint64 address = 1;
// The length of the buffer.
// It should be equal to 'product(*shape) * itemsize'.
// 'int64' represents 'Py_ssize_t' of the corresponding python interface.
int64 length = 2;
// The size of every element in the buffer.
// 'int64' represents 'Py_ssize_t' of the corresponding python interface.
int64 itemsize = 3;
// The dimensions of the object (for example, number of tensor axises).
int32 ndim = 4;
// Readonly flag for this object.
bool readonly = 5;
// The format string for every item. This is optional.
// If this is NULL, "B" (unsigned bytes) is assumed.
string format = 6;
// The shape of the object per dimension. This is NULL when ndim == 0
// The length of the shape should be equal to 'ndim'.
// 'int64' represents 'Py_ssize_t' of the corresponding python interface.
repeated int64 shape = 7;
// The stride of the object per dimension. This is NULL when ndim == 0
// The length of the strides should be equal to 'ndim'.
// 'int64' represents 'Py_ssize_t' of the corresponding python interface.
repeated int64 strides = 8;
// 'suboffsets' is ignored since it is required to be NULL by the pickle5 protocol.
}
// The message for pickle5 serialized python object.
message PythonObject {
// The offset of the inband data section relative to the beginning of the Plasma buffer.
uint64 inband_data_offset = 1;
// The size of the inband data section.
uint64 inband_data_size = 2;
// The offset of the raw buffers section relative to the beginning of the Plasma buffer.
uint64 raw_buffers_offset = 3;
// The size of the buffers section. It is not used in deserialization
// because we already have the length and address of every buffer. However, it could
// be useful for debugging or future adjustment, so we just keep it.
uint64 raw_buffers_size = 4;
// The metadata of python buffer objects.
repeated PythonBuffer buffer = 5;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/actor_registration.cc
|
C++
|
#include "ray/raylet/actor_registration.h"
#include <sstream>
#include "ray/util/logging.h"
namespace ray {
namespace raylet {
ActorRegistration::ActorRegistration(const ActorTableData &actor_table_data)
: actor_table_data_(actor_table_data) {
// The first task submitted on each new actor handle will depend on the actor
// creation object, so we always pin it.
dummy_objects_[GetActorCreationDependency()]++;
}
ActorRegistration::ActorRegistration(const ActorTableData &actor_table_data,
const ActorCheckpointData &checkpoint_data)
: actor_table_data_(actor_table_data),
execution_dependency_(
ObjectID::FromBinary(checkpoint_data.execution_dependency())) {
// Restore `frontier_`.
for (int64_t i = 0; i < checkpoint_data.handle_ids_size(); i++) {
auto caller_id = TaskID::FromBinary(checkpoint_data.handle_ids(i));
auto &frontier_entry = frontier_[caller_id];
frontier_entry.task_counter = checkpoint_data.task_counters(i);
frontier_entry.execution_dependency =
ObjectID::FromBinary(checkpoint_data.frontier_dependencies(i));
}
// Restore `dummy_objects_`.
for (int64_t i = 0; i < checkpoint_data.unreleased_dummy_objects_size(); i++) {
auto dummy = ObjectID::FromBinary(checkpoint_data.unreleased_dummy_objects(i));
dummy_objects_[dummy] = checkpoint_data.num_dummy_object_dependencies(i);
}
}
const ClientID ActorRegistration::GetNodeManagerId() const {
return ClientID::FromBinary(actor_table_data_.address().raylet_id());
}
const ObjectID ActorRegistration::GetActorCreationDependency() const {
return ObjectID::FromBinary(actor_table_data_.actor_creation_dummy_object_id());
}
const ObjectID ActorRegistration::GetExecutionDependency() const {
return execution_dependency_;
}
const JobID ActorRegistration::GetJobId() const {
return JobID::FromBinary(actor_table_data_.job_id());
}
const int64_t ActorRegistration::GetMaxReconstructions() const {
return actor_table_data_.max_reconstructions();
}
const int64_t ActorRegistration::GetRemainingReconstructions() const {
return actor_table_data_.remaining_reconstructions();
}
const std::unordered_map<TaskID, ActorRegistration::FrontierLeaf>
&ActorRegistration::GetFrontier() const {
return frontier_;
}
ObjectID ActorRegistration::ExtendFrontier(const TaskID &caller_id,
const ObjectID &execution_dependency) {
auto &frontier_entry = frontier_[caller_id];
// Release the reference to the previous cursor for this
// actor handle, if there was one.
ObjectID object_to_release;
if (!frontier_entry.execution_dependency.IsNil()) {
auto it = dummy_objects_.find(frontier_entry.execution_dependency);
RAY_CHECK(it != dummy_objects_.end());
it->second--;
RAY_CHECK(it->second >= 0);
if (it->second == 0) {
object_to_release = frontier_entry.execution_dependency;
dummy_objects_.erase(it);
}
}
frontier_entry.task_counter++;
frontier_entry.execution_dependency = execution_dependency;
execution_dependency_ = execution_dependency;
// Add the reference to the new cursor for this actor handle.
dummy_objects_[execution_dependency]++;
return object_to_release;
}
int ActorRegistration::NumHandles() const { return frontier_.size(); }
std::shared_ptr<ActorCheckpointData> ActorRegistration::GenerateCheckpointData(
const ActorID &actor_id, const Task *task) {
// Make a copy of the actor registration
ActorRegistration copy = *this;
if (task) {
const auto actor_caller_id = task->GetTaskSpecification().CallerId();
const auto dummy_object = task->GetTaskSpecification().ActorDummyObject();
// Extend its frontier to include the most recent task.
// NOTE(hchen): For non-direct-call actors, this is needed because this method is
// called before `FinishAssignedTask`, which will be called when the worker tries to
// fetch the next task. For direct-call actors, checkpoint data doesn't contain
// frontier info, so we don't need to do `ExtendFrontier` here.
copy.ExtendFrontier(actor_caller_id, dummy_object);
}
// Use actor's current state to generate checkpoint data.
auto checkpoint_data = std::make_shared<ActorCheckpointData>();
checkpoint_data->set_actor_id(actor_id.Binary());
checkpoint_data->set_execution_dependency(copy.GetExecutionDependency().Binary());
for (const auto &frontier : copy.GetFrontier()) {
checkpoint_data->add_handle_ids(frontier.first.Binary());
checkpoint_data->add_task_counters(frontier.second.task_counter);
checkpoint_data->add_frontier_dependencies(
frontier.second.execution_dependency.Binary());
}
for (const auto &entry : copy.GetDummyObjects()) {
checkpoint_data->add_unreleased_dummy_objects(entry.first.Binary());
checkpoint_data->add_num_dummy_object_dependencies(entry.second);
}
ActorCheckpointID checkpoint_id = ActorCheckpointID::FromRandom();
checkpoint_data->set_checkpoint_id(checkpoint_id.Binary());
return checkpoint_data;
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/actor_registration.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_ACTOR_REGISTRATION_H
#define RAY_RAYLET_ACTOR_REGISTRATION_H
#include <unordered_map>
#include "ray/common/id.h"
#include "ray/common/task/task.h"
#include "ray/protobuf/gcs.pb.h"
namespace ray {
namespace raylet {
using rpc::ActorTableData;
using ActorState = rpc::ActorTableData::ActorState;
using rpc::ActorCheckpointData;
/// \class ActorRegistration
///
/// Information about an actor registered in the system. This includes the
/// actor's current node manager location, and if local, information about its
/// current execution state, used for reconstruction purposes, and whether the
/// actor is currently alive or not.
class ActorRegistration {
public:
/// Create an actor registration.
///
/// \param actor_table_data Information from the global actor table about
/// this actor. This includes the actor's node manager location.
explicit ActorRegistration(const ActorTableData &actor_table_data);
/// Recreate an actor's registration from a checkpoint.
///
/// \param checkpoint_data The checkpoint used to restore the actor.
ActorRegistration(const ActorTableData &actor_table_data,
const ActorCheckpointData &checkpoint_data);
/// Each actor may have multiple callers, or "handles". A frontier leaf
/// represents the execution state of the actor with respect to a single
/// handle.
struct FrontierLeaf {
/// The number of tasks submitted by this handle that have executed on the
/// actor so far.
int64_t task_counter;
/// The execution dependency returned by the task submitted by this handle
/// that most recently executed on the actor.
ObjectID execution_dependency;
};
/// Get the actor table data.
///
/// \return The actor table data.
const ActorTableData &GetTableData() const { return actor_table_data_; }
/// Get the actor's current state (ALIVE or DEAD).
///
/// \return The actor's current state.
const ActorState GetState() const { return actor_table_data_.state(); }
/// Update actor's state.
void SetState(const ActorState &state) { actor_table_data_.set_state(state); }
/// Get the actor's node manager location.
///
/// \return The actor's node manager location. All tasks for the actor should
/// be forwarded to this node.
const ClientID GetNodeManagerId() const;
/// Get the object that represents the actor's initial state. This is the
/// execution dependency returned by this actor's creation task. If
/// reconstructed, this will recreate the actor.
///
/// \return The execution dependency returned by the actor's creation task.
const ObjectID GetActorCreationDependency() const;
/// Get actor's job ID.
const JobID GetJobId() const;
/// Get the max number of times this actor should be reconstructed.
const int64_t GetMaxReconstructions() const;
/// Get the remaining number of times this actor should be reconstructed.
const int64_t GetRemainingReconstructions() const;
/// Get the object that represents the actor's current state. This is the
/// execution dependency returned by the task most recently executed on the
/// actor. The next task to execute on the actor should be marked as
/// execution-dependent on this object.
///
/// \return The execution dependency returned by the most recently executed
/// task.
const ObjectID GetExecutionDependency() const;
/// Get the execution frontier of the actor, indexed by handle. This captures
/// the execution state of the actor, a summary of which tasks have executed
/// so far.
///
/// \return The actor frontier, a map from handle ID to execution state for
/// that handle.
const std::unordered_map<TaskID, FrontierLeaf> &GetFrontier() const;
/// Get all the dummy objects of this actor's tasks.
const std::unordered_map<ObjectID, int64_t> &GetDummyObjects() const {
return dummy_objects_;
}
/// Extend the frontier of the actor by a single task. This should be called
/// whenever the actor executes a task.
///
/// \param handle_id The ID of the handle that submitted the task.
/// \param execution_dependency The object representing the actor's new
/// state. This is the execution dependency returned by the task.
/// \return The dummy object that can be released as a result of the executed
/// task. If no dummy object can be released, then this is nil.
ObjectID ExtendFrontier(const TaskID &caller_id, const ObjectID &execution_dependency);
/// Returns num handles to this actor entry.
///
/// \return int.
int NumHandles() const;
/// Generate checkpoint data based on actor's current state.
///
/// \param actor_id ID of this actor.
/// \param task The task that just finished on the actor. (nullptr when it's direct
/// call.)
/// \return A shared pointer to the generated checkpoint data.
std::shared_ptr<ActorCheckpointData> GenerateCheckpointData(const ActorID &actor_id,
const Task *task);
private:
/// Information from the global actor table about this actor, including the
/// node manager location.
ActorTableData actor_table_data_;
/// The object representing the state following the actor's most recently
/// executed task. The next task to execute on the actor should be marked as
/// execution-dependent on this object.
ObjectID execution_dependency_;
/// The execution frontier of the actor, which represents which tasks have
/// executed so far and which tasks may execute next, based on execution
/// dependencies. This is indexed by handle.
std::unordered_map<TaskID, FrontierLeaf> frontier_;
/// This map is used to track all the unreleased dummy objects for this
/// actor. The map key is the dummy object ID, and the map value is the
/// number of actor handles that depend on that dummy object. When the map
/// value decreases to 0, the dummy object is safe to release from the object
/// manager, since this means that no actor handle will depend on that dummy
/// object again.
///
/// An actor handle depends on a dummy object when its next unfinished task
/// depends on the dummy object. For a given dummy object (say D) created by
/// task (say T) that was submitted by an actor handle (say H), there could
/// be 2 types of such actor handles:
/// 1. T is the last task submitted by H that was executed. If the next task
/// submitted by H hasn't finished yet, then H still depends on D since D
/// will be in the next task's execution dependencies.
/// 2. Any handles that were forked from H after T finished, and before T's
/// next task finishes. Such handles depend on D until their first tasks
/// finish since D will be their first tasks' execution dependencies.
std::unordered_map<ObjectID, int64_t> dummy_objects_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_ACTOR_REGISTRATION_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/client_connection_test.cc
|
C++
|
#include <list>
#include <memory>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ray/common/client_connection.h"
namespace ray {
namespace raylet {
class ClientConnectionTest : public ::testing::Test {
public:
ClientConnectionTest()
: io_service_(), in_(io_service_), out_(io_service_), error_message_type_(1) {
boost::asio::local::connect_pair(in_, out_);
}
ray::Status WriteBadMessage(std::shared_ptr<ray::LocalClientConnection> conn,
int64_t type, int64_t length, const uint8_t *message) {
std::vector<boost::asio::const_buffer> message_buffers;
auto write_cookie = 123456; // incorrect version.
message_buffers.push_back(boost::asio::buffer(&write_cookie, sizeof(write_cookie)));
message_buffers.push_back(boost::asio::buffer(&type, sizeof(type)));
message_buffers.push_back(boost::asio::buffer(&length, sizeof(length)));
message_buffers.push_back(boost::asio::buffer(message, length));
return conn->WriteBuffer(message_buffers);
}
protected:
boost::asio::io_service io_service_;
local_stream_protocol::socket in_;
local_stream_protocol::socket out_;
int64_t error_message_type_;
};
TEST_F(ClientConnectionTest, SimpleSyncWrite) {
const uint8_t arr[5] = {1, 2, 3, 4, 5};
int num_messages = 0;
ClientHandler<local_stream_protocol> client_handler =
[](LocalClientConnection &client) {};
MessageHandler<local_stream_protocol> message_handler =
[&arr, &num_messages](std::shared_ptr<LocalClientConnection> client,
int64_t message_type, const uint8_t *message) {
ASSERT_TRUE(!std::memcmp(arr, message, 5));
num_messages += 1;
};
auto conn1 = LocalClientConnection::Create(
client_handler, message_handler, std::move(in_), "conn1", {}, error_message_type_);
auto conn2 = LocalClientConnection::Create(
client_handler, message_handler, std::move(out_), "conn2", {}, error_message_type_);
RAY_CHECK_OK(conn1->WriteMessage(0, 5, arr));
RAY_CHECK_OK(conn2->WriteMessage(0, 5, arr));
conn1->ProcessMessages();
conn2->ProcessMessages();
io_service_.run();
ASSERT_EQ(num_messages, 2);
}
TEST_F(ClientConnectionTest, SimpleAsyncWrite) {
const uint8_t msg1[5] = {1, 2, 3, 4, 5};
const uint8_t msg2[5] = {4, 4, 4, 4, 4};
const uint8_t msg3[5] = {8, 8, 8, 8, 8};
int num_messages = 0;
ClientHandler<local_stream_protocol> client_handler =
[](LocalClientConnection &client) {};
MessageHandler<local_stream_protocol> noop_handler =
[](std::shared_ptr<LocalClientConnection> client, int64_t message_type,
const uint8_t *message) {};
std::shared_ptr<LocalClientConnection> reader = NULL;
MessageHandler<local_stream_protocol> message_handler =
[&msg1, &msg2, &msg3, &num_messages, &reader](
std::shared_ptr<LocalClientConnection> client, int64_t message_type,
const uint8_t *message) {
if (num_messages == 0) {
ASSERT_TRUE(!std::memcmp(msg1, message, 5));
} else if (num_messages == 1) {
ASSERT_TRUE(!std::memcmp(msg2, message, 5));
} else {
ASSERT_TRUE(!std::memcmp(msg3, message, 5));
}
num_messages += 1;
if (num_messages < 3) {
reader->ProcessMessages();
}
};
auto writer = LocalClientConnection::Create(
client_handler, noop_handler, std::move(in_), "writer", {}, error_message_type_);
reader = LocalClientConnection::Create(client_handler, message_handler, std::move(out_),
"reader", {}, error_message_type_);
std::function<void(const ray::Status &)> callback = [](const ray::Status &status) {
RAY_CHECK_OK(status);
};
writer->WriteMessageAsync(0, 5, msg1, callback);
writer->WriteMessageAsync(0, 5, msg2, callback);
writer->WriteMessageAsync(0, 5, msg3, callback);
reader->ProcessMessages();
io_service_.run();
ASSERT_EQ(num_messages, 3);
}
TEST_F(ClientConnectionTest, SimpleAsyncError) {
const uint8_t msg1[5] = {1, 2, 3, 4, 5};
ClientHandler<local_stream_protocol> client_handler =
[](LocalClientConnection &client) {};
MessageHandler<local_stream_protocol> noop_handler =
[](std::shared_ptr<LocalClientConnection> client, int64_t message_type,
const uint8_t *message) {};
auto writer = LocalClientConnection::Create(
client_handler, noop_handler, std::move(in_), "writer", {}, error_message_type_);
std::function<void(const ray::Status &)> callback = [](const ray::Status &status) {
ASSERT_TRUE(!status.ok());
};
writer->Close();
writer->WriteMessageAsync(0, 5, msg1, callback);
io_service_.run();
}
TEST_F(ClientConnectionTest, CallbackWithSharedRefDoesNotLeakConnection) {
const uint8_t msg1[5] = {1, 2, 3, 4, 5};
ClientHandler<local_stream_protocol> client_handler =
[](LocalClientConnection &client) {};
MessageHandler<local_stream_protocol> noop_handler =
[](std::shared_ptr<LocalClientConnection> client, int64_t message_type,
const uint8_t *message) {};
auto writer = LocalClientConnection::Create(
client_handler, noop_handler, std::move(in_), "writer", {}, error_message_type_);
std::function<void(const ray::Status &)> callback =
[writer](const ray::Status &status) {
static_cast<void>(writer);
ASSERT_TRUE(status.ok());
};
writer->WriteMessageAsync(0, 5, msg1, callback);
io_service_.run();
}
TEST_F(ClientConnectionTest, ProcessBadMessage) {
const uint8_t arr[5] = {1, 2, 3, 4, 5};
int num_messages = 0;
ClientHandler<local_stream_protocol> client_handler =
[](LocalClientConnection &client) {};
MessageHandler<local_stream_protocol> message_handler =
[&arr, &num_messages](std::shared_ptr<LocalClientConnection> client,
int64_t message_type, const uint8_t *message) {
ASSERT_TRUE(!std::memcmp(arr, message, 5));
num_messages += 1;
};
auto writer = LocalClientConnection::Create(
client_handler, message_handler, std::move(in_), "writer", {}, error_message_type_);
auto reader =
LocalClientConnection::Create(client_handler, message_handler, std::move(out_),
"reader", {}, error_message_type_);
// If client ID is set, bad message would crash the test.
// reader->SetClientID(UniqueID::FromRandom());
// Intentionally write a message with incorrect cookie.
// Verify it won't crash as long as client ID is not set.
RAY_CHECK_OK(WriteBadMessage(writer, 0, 5, arr));
reader->ProcessMessages();
io_service_.run();
ASSERT_EQ(num_messages, 0);
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/lineage_cache.cc
|
C++
|
#include "lineage_cache.h"
#include <sstream>
#include "ray/gcs/redis_gcs_client.h"
#include "ray/stats/stats.h"
namespace ray {
namespace raylet {
LineageEntry::LineageEntry(const Task &task, GcsStatus status)
: status_(status), task_(task) {
ComputeParentTaskIds();
}
GcsStatus LineageEntry::GetStatus() const { return status_; }
bool LineageEntry::SetStatus(GcsStatus new_status) {
if (status_ < new_status) {
status_ = new_status;
return true;
} else {
return false;
}
}
void LineageEntry::ResetStatus(GcsStatus new_status) {
RAY_CHECK(new_status < status_);
status_ = new_status;
}
void LineageEntry::MarkExplicitlyForwarded(const ClientID &node_id) {
forwarded_to_.insert(node_id);
}
bool LineageEntry::WasExplicitlyForwarded(const ClientID &node_id) const {
return forwarded_to_.find(node_id) != forwarded_to_.end();
}
const TaskID LineageEntry::GetEntryId() const {
return task_.GetTaskSpecification().TaskId();
}
const std::unordered_set<TaskID> &LineageEntry::GetParentTaskIds() const {
return parent_task_ids_;
}
void LineageEntry::ComputeParentTaskIds() {
parent_task_ids_.clear();
// A task's parents are the tasks that created its arguments.
for (const auto &dependency : task_.GetDependencies()) {
parent_task_ids_.insert(dependency.TaskId());
}
}
const Task &LineageEntry::TaskData() const { return task_; }
Task &LineageEntry::TaskDataMutable() { return task_; }
void LineageEntry::UpdateTaskData(const Task &task) {
task_.CopyTaskExecutionSpec(task);
ComputeParentTaskIds();
}
Lineage::Lineage() {}
boost::optional<const LineageEntry &> Lineage::GetEntry(const TaskID &task_id) const {
auto entry = entries_.find(task_id);
if (entry != entries_.end()) {
return entry->second;
} else {
return boost::optional<const LineageEntry &>();
}
}
boost::optional<LineageEntry &> Lineage::GetEntryMutable(const TaskID &task_id) {
auto entry = entries_.find(task_id);
if (entry != entries_.end()) {
return entry->second;
} else {
return boost::optional<LineageEntry &>();
}
}
void Lineage::RemoveChild(const TaskID &parent_id, const TaskID &child_id) {
auto parent_it = children_.find(parent_id);
RAY_CHECK(parent_it->second.erase(child_id) == 1);
if (parent_it->second.empty()) {
children_.erase(parent_it);
}
}
void Lineage::AddChild(const TaskID &parent_id, const TaskID &child_id) {
auto inserted = children_[parent_id].insert(child_id);
RAY_CHECK(inserted.second);
}
bool Lineage::SetEntry(const Task &task, GcsStatus status) {
// Get the status of the current entry at the key.
auto task_id = task.GetTaskSpecification().TaskId();
auto it = entries_.find(task_id);
bool updated = false;
if (it != entries_.end()) {
if (it->second.SetStatus(status)) {
// We assume here that the new `task` has the same fields as the task
// already in the lineage cache. If this is not true, then it is
// necessary to update the task data of the existing lineage cache entry
// with LineageEntry::UpdateTaskData.
updated = true;
}
} else {
LineageEntry new_entry(task, status);
it = entries_.emplace(std::make_pair(task_id, std::move(new_entry))).first;
updated = true;
// New task data was added to the local cache, so record which tasks it
// depends on. Add all new tasks that it depends on.
for (const auto &parent_id : it->second.GetParentTaskIds()) {
AddChild(parent_id, task_id);
}
}
return updated;
}
boost::optional<LineageEntry> Lineage::PopEntry(const TaskID &task_id) {
auto entry = entries_.find(task_id);
if (entry != entries_.end()) {
LineageEntry entry = std::move(entries_.at(task_id));
// Remove the task's dependencies.
for (const auto &parent_id : entry.GetParentTaskIds()) {
RemoveChild(parent_id, task_id);
}
entries_.erase(task_id);
return entry;
} else {
return boost::optional<LineageEntry>();
}
}
const std::unordered_map<const TaskID, LineageEntry> &Lineage::GetEntries() const {
return entries_;
}
const std::unordered_set<TaskID> &Lineage::GetChildren(const TaskID &task_id) const {
static const std::unordered_set<TaskID> empty_children;
const auto it = children_.find(task_id);
if (it != children_.end()) {
return it->second;
} else {
return empty_children;
}
}
LineageCache::LineageCache(const ClientID &self_node_id,
std::shared_ptr<gcs::GcsClient> gcs_client,
uint64_t max_lineage_size)
: self_node_id_(self_node_id), gcs_client_(gcs_client) {}
/// A helper function to add some uncommitted lineage to the local cache.
void LineageCache::AddUncommittedLineage(const TaskID &task_id,
const Lineage &uncommitted_lineage) {
RAY_LOG(DEBUG) << "Adding uncommitted task " << task_id << " on " << self_node_id_;
// If the entry is not found in the lineage to merge, then we stop since
// there is nothing to copy into the merged lineage.
auto entry = uncommitted_lineage.GetEntry(task_id);
if (!entry) {
return;
} else if (entry->TaskData().GetTaskSpecification().IsDirectCall()) {
// Disable lineage logging for direct tasks.
return;
}
RAY_CHECK(entry->GetStatus() == GcsStatus::UNCOMMITTED);
// Insert a copy of the entry into our cache.
const auto &parent_ids = entry->GetParentTaskIds();
// If the insert is successful, then continue the DFS. The insert will fail
// if the new entry has an equal or lower GCS status than the current entry
// in our cache. This also prevents us from traversing the same node twice.
if (lineage_.SetEntry(entry->TaskData(), entry->GetStatus())) {
RAY_CHECK(SubscribeTask(task_id));
for (const auto &parent_id : parent_ids) {
AddUncommittedLineage(parent_id, uncommitted_lineage);
}
}
}
bool LineageCache::CommitTask(const Task &task) {
if (task.GetTaskSpecification().IsDirectCall()) {
// Disable lineage logging for direct tasks.
return true;
}
const TaskID task_id = task.GetTaskSpecification().TaskId();
RAY_LOG(DEBUG) << "Committing task " << task_id << " on " << self_node_id_;
if (lineage_.SetEntry(task, GcsStatus::UNCOMMITTED) ||
lineage_.GetEntry(task_id)->GetStatus() == GcsStatus::UNCOMMITTED) {
// Attempt to flush the task if the task is uncommitted.
FlushTask(task_id);
return true;
} else {
// The task was already committing (COMMITTING).
return false;
}
}
void LineageCache::FlushAllUncommittedTasks() {
size_t num_flushed = 0;
for (const auto &entry : lineage_.GetEntries()) {
// Flush all tasks that have not yet committed.
if (entry.second.GetStatus() == GcsStatus::UNCOMMITTED) {
RAY_CHECK(UnsubscribeTask(entry.first));
FlushTask(entry.first);
num_flushed++;
}
}
RAY_LOG(DEBUG) << "Flushed " << num_flushed << " uncommitted tasks";
}
void LineageCache::MarkTaskAsForwarded(const TaskID &task_id, const ClientID &node_id) {
RAY_CHECK(!node_id.IsNil());
auto entry = lineage_.GetEntryMutable(task_id);
if (entry) {
entry->MarkExplicitlyForwarded(node_id);
}
}
/// A helper function to get the uncommitted lineage of a task.
void GetUncommittedLineageHelper(const TaskID &task_id, const Lineage &lineage_from,
Lineage &lineage_to, const ClientID &node_id) {
// If the entry is not found in the lineage to merge, then we stop since
// there is nothing to copy into the merged lineage.
auto entry = lineage_from.GetEntry(task_id);
if (!entry) {
return;
}
// If this task has already been forwarded to this node, then we can stop.
if (entry->WasExplicitlyForwarded(node_id)) {
return;
}
// Insert a copy of the entry into lineage_to. If the insert is successful,
// then continue the DFS. The insert will fail if the new entry has an equal
// or lower GCS status than the current entry in lineage_to. This also
// prevents us from traversing the same node twice.
if (lineage_to.SetEntry(entry->TaskData(), entry->GetStatus())) {
for (const auto &parent_id : entry->GetParentTaskIds()) {
GetUncommittedLineageHelper(parent_id, lineage_from, lineage_to, node_id);
}
}
}
Lineage LineageCache::GetUncommittedLineage(const TaskID &task_id,
const ClientID &node_id) const {
Lineage uncommitted_lineage;
// Add all uncommitted ancestors from the lineage cache to the uncommitted
// lineage of the requested task.
GetUncommittedLineageHelper(task_id, lineage_, uncommitted_lineage, node_id);
// The lineage always includes the requested task id, so add the task if it
// wasn't already added. The requested task may not have been added if it was
// already explicitly forwarded to this node before.
if (uncommitted_lineage.GetEntries().empty()) {
auto entry = lineage_.GetEntry(task_id);
if (entry) {
RAY_CHECK(uncommitted_lineage.SetEntry(entry->TaskData(), entry->GetStatus()));
}
}
return uncommitted_lineage;
}
void LineageCache::FlushTask(const TaskID &task_id) {
auto entry = lineage_.GetEntryMutable(task_id);
RAY_CHECK(entry);
RAY_CHECK(entry->GetStatus() < GcsStatus::COMMITTING);
auto task_callback = [this, task_id](Status status) {
RAY_CHECK(status.ok());
HandleEntryCommitted(task_id);
};
auto task = lineage_.GetEntry(task_id);
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->CopyFrom(
task->TaskData().GetTaskSpecification().GetMessage());
task_data->mutable_task()->mutable_task_execution_spec()->CopyFrom(
task->TaskData().GetTaskExecutionSpec().GetMessage());
RAY_CHECK_OK(gcs_client_->Tasks().AsyncAdd(task_data, task_callback));
// We successfully wrote the task, so mark it as committing.
// TODO(swang): Use a batched interface and write with all object entries.
RAY_CHECK(entry->SetStatus(GcsStatus::COMMITTING));
}
bool LineageCache::SubscribeTask(const TaskID &task_id) {
auto inserted = subscribed_tasks_.insert(task_id);
bool unsubscribed = inserted.second;
if (unsubscribed) {
auto subscribe = [this](const TaskID &task_id, const TaskTableData) {
HandleEntryCommitted(task_id);
};
// Subscribe to the task.
RAY_CHECK_OK(gcs_client_->Tasks().AsyncSubscribe(task_id, subscribe,
/*done*/ nullptr));
}
// Return whether we were previously unsubscribed to this task and are now
// subscribed.
return unsubscribed;
}
bool LineageCache::UnsubscribeTask(const TaskID &task_id) {
auto it = subscribed_tasks_.find(task_id);
bool subscribed = (it != subscribed_tasks_.end());
if (subscribed) {
// Cancel subscribe to the task.
RAY_CHECK_OK(gcs_client_->Tasks().AsyncUnsubscribe(task_id, /*done*/ nullptr));
subscribed_tasks_.erase(it);
}
// Return whether we were previously subscribed to this task and are now
// unsubscribed.
return subscribed;
}
void LineageCache::EvictTask(const TaskID &task_id) {
// If the entry has already been evicted, exit.
auto entry = lineage_.GetEntry(task_id);
if (!entry) {
return;
}
// If the entry has not yet been committed, exit.
if (entry->GetStatus() != GcsStatus::COMMITTED) {
return;
}
// Entries cannot be safely evicted until their parents are all evicted.
for (const auto &parent_id : entry->GetParentTaskIds()) {
if (ContainsTask(parent_id)) {
return;
}
}
// Evict the task.
RAY_LOG(DEBUG) << "Evicting task " << task_id << " on " << self_node_id_;
lineage_.PopEntry(task_id);
// Try to evict the children of the evict task. These are the tasks that have
// a dependency on the evicted task.
const auto children = lineage_.GetChildren(task_id);
for (const auto &child_id : children) {
EvictTask(child_id);
}
}
void LineageCache::HandleEntryCommitted(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Task committed: " << task_id;
auto entry = lineage_.GetEntryMutable(task_id);
if (!entry) {
// The task has already been evicted due to a previous commit notification.
return;
}
// Record the commit acknowledgement and attempt to evict the task.
entry->SetStatus(GcsStatus::COMMITTED);
EvictTask(task_id);
// We got the notification about the task's commit, so no longer need any
// more notifications.
UnsubscribeTask(task_id);
}
const Task &LineageCache::GetTaskOrDie(const TaskID &task_id) const {
const auto &entries = lineage_.GetEntries();
auto it = entries.find(task_id);
RAY_CHECK(it != entries.end());
return it->second.TaskData();
}
bool LineageCache::ContainsTask(const TaskID &task_id) const {
const auto &entries = lineage_.GetEntries();
auto it = entries.find(task_id);
return it != entries.end();
}
const Lineage &LineageCache::GetLineage() const { return lineage_; }
std::string LineageCache::DebugString() const {
std::stringstream result;
result << "LineageCache:";
result << "\n- child map size: " << lineage_.GetChildrenSize();
result << "\n- num subscribed tasks: " << subscribed_tasks_.size();
result << "\n- lineage size: " << lineage_.GetEntries().size();
return result.str();
}
void LineageCache::RecordMetrics() const {
stats::LineageCacheStats().Record(lineage_.GetChildrenSize(),
{{stats::ValueTypeKey, "num_children"}});
stats::LineageCacheStats().Record(subscribed_tasks_.size(),
{{stats::ValueTypeKey, "num_subscribed_tasks"}});
stats::LineageCacheStats().Record(lineage_.GetEntries().size(),
{{stats::ValueTypeKey, "num_lineages"}});
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/lineage_cache.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_LINEAGE_CACHE_H
#define RAY_RAYLET_LINEAGE_CACHE_H
#include <gtest/gtest_prod.h>
#if defined(__clang__) && defined(_MSC_VER)
// TODO(mehrdadn): Remove this Windows (clang-cl) workaround once we upgrade to
// Boost > 1.68: https://lists.boost.org/Archives/boost/2018/09/243420.php
#include <boost/type_traits.hpp>
#endif
#include <boost/optional.hpp>
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/common/task/task.h"
#include "ray/gcs/redis_gcs_client.h"
namespace ray {
namespace raylet {
using rpc::TaskTableData;
/// The status of a lineage cache entry according to its status in the GCS.
/// Tasks can only transition to a higher GcsStatus (e.g., an UNCOMMITTED state
/// can become COMMITTING but not vice versa). If a task is evicted from the
/// local cache, it implicitly goes back to state `NONE`, after which it may be
/// added to the local cache again (e.g., if it is forwarded to us again).
enum class GcsStatus {
/// The task is not in the lineage cache.
NONE = 0,
/// The task is uncommitted. Unless there is a failure, we will expect a
/// different node to commit this task.
UNCOMMITTED,
/// We flushed this task and are waiting for the commit acknowledgement.
COMMITTING,
// Tasks for which we received a commit acknowledgement, but which we cannot
// evict yet (due to an ancestor that has not been evicted). This is to allow
// a performance optimization that avoids unnecessary subscribes when we
// receive tasks that were already COMMITTED at the sender.
COMMITTED,
};
/// \class LineageEntry
///
/// A task entry in the data lineage. Each entry's parents are the tasks that
/// created the entry's arguments.
class LineageEntry {
public:
/// Create an entry for a task.
///
/// \param task The task data to eventually be written back to the GCS.
/// \param status The status of this entry, according to its write status in
/// the GCS.
LineageEntry(const Task &task, GcsStatus status);
/// Get this entry's GCS status.
///
/// \return The entry's status in the GCS.
GcsStatus GetStatus() const;
/// Set this entry's GCS status. The status is only set if the new status
/// is strictly greater than the entry's previous status, according to the
/// GcsStatus enum.
///
/// \param new_status Set the entry's status to this value if it is greater
/// than the current status.
/// \return Whether the entry was set to the new status.
bool SetStatus(GcsStatus new_status);
/// Reset this entry's GCS status to a lower status. The new status must
/// be lower than the current status.
///
/// \param new_status This must be lower than the current status.
void ResetStatus(GcsStatus new_status);
/// Mark this entry as having been explicitly forwarded to a remote node manager.
///
/// \param node_id The ID of the remote node manager.
void MarkExplicitlyForwarded(const ClientID &node_id);
/// Gets whether this entry was explicitly forwarded to a remote node.
///
/// \param node_id The ID of the remote node manager.
/// \return Whether this entry was explicitly forwarded to the remote node.
bool WasExplicitlyForwarded(const ClientID &node_id) const;
/// Get this entry's ID.
///
/// \return The entry's ID.
const TaskID GetEntryId() const;
/// Get the IDs of this entry's parent tasks. These are the IDs of the tasks
/// that created its arguments.
///
/// \return The IDs of the parent entries.
const std::unordered_set<TaskID> &GetParentTaskIds() const;
/// Get the task data.
///
/// \return The task data.
const Task &TaskData() const;
/// Get a mutable version of the task data.
///
/// \return The task data.
/// TODO(swang): This is pretty ugly.
Task &TaskDataMutable();
/// Update the task data with a new task.
///
/// \return Void.
void UpdateTaskData(const Task &task);
private:
/// Compute cached parent task IDs. This task is dependent on values returned
/// by these tasks.
void ComputeParentTaskIds();
/// The current state of this entry according to its status in the GCS.
GcsStatus status_;
/// The task data to be written to the GCS. This is nullptr if the entry is
/// an object.
// const Task task_;
Task task_;
/// A cached copy of the parent task IDs. This task is dependent on values
/// returned by these tasks.
std::unordered_set<TaskID> parent_task_ids_;
/// IDs of node managers that this task has been explicitly forwarded to.
std::unordered_set<ClientID> forwarded_to_;
};
/// \class Lineage
///
/// A lineage DAG, according to the data dependency graph. Each node is a task,
/// with an outgoing edge to each of its parent tasks. For a given task, the
/// parents are the tasks that created its arguments. Each entry also records
/// the current status in the GCS for that task or object.
class Lineage {
public:
/// Construct an empty Lineage.
Lineage();
/// Get an entry from the lineage.
///
/// \param entry_id The ID of the entry to get.
/// \return An optional reference to the entry. If this is empty, then the
/// entry ID is not in the lineage.
boost::optional<const LineageEntry &> GetEntry(const TaskID &entry_id) const;
boost::optional<LineageEntry &> GetEntryMutable(const TaskID &task_id);
/// Set an entry in the lineage. If an entry with this ID already exists,
/// then the entry is overwritten if and only if the new entry has a higher
/// GCS status than the current. The current entry's object or task data will
/// also be overwritten.
///
/// \param task The task data to set, if status is greater than the current entry.
/// \param status The GCS status.
/// \return Whether the entry was set.
bool SetEntry(const Task &task, GcsStatus status);
/// Delete and return an entry from the lineage.
///
/// \param entry_id The ID of the entry to pop.
/// \return An optional reference to the popped entry. If this is empty, then
/// the entry ID is not in the lineage.
boost::optional<LineageEntry> PopEntry(const TaskID &entry_id);
/// Get all entries in the lineage.
///
/// \return A const reference to the lineage entries.
const std::unordered_map<const TaskID, LineageEntry> &GetEntries() const;
/// Return the IDs of tasks in the lineage that are dependent on the given
/// task.
///
/// \param The ID of the task whose children to get.
/// \return The list of IDs for tasks that are in the lineage and dependent
/// on the given task.
const std::unordered_set<TaskID> &GetChildren(const TaskID &task_id) const;
/// Return the size of the children_ map. This is used for debugging purposes
/// only.
size_t GetChildrenSize() const { return children_.size(); }
private:
/// The lineage entries.
std::unordered_map<const TaskID, LineageEntry> entries_;
/// A mapping from each task in the lineage to its children.
std::unordered_map<TaskID, std::unordered_set<TaskID>> children_;
/// Record the fact that the child task depends on the parent task.
void AddChild(const TaskID &parent_id, const TaskID &child_id);
/// Erase the fact that the child task depends on the parent task.
void RemoveChild(const TaskID &parent_id, const TaskID &child_id);
};
/// \class LineageCache
///
/// A cache of the task table. This consists of all tasks that this node owns,
/// as well as their lineage, that have not yet been added durably
/// ("committed") to the GCS.
///
/// The current policy is to flush each task as soon as it enters the
/// UNCOMMITTED_READY state. For safety, we only evict tasks if they have been
/// committed and if their parents have been all evicted. Thus, the invariant
/// is that if g depends on f, and g has been evicted, then f must have been
/// committed.
class LineageCache {
public:
/// Create a lineage cache for the given task storage system.
/// TODO(swang): Pass in the policy (interface?).
LineageCache(const ClientID &self_node_id, std::shared_ptr<gcs::GcsClient> gcs_client,
uint64_t max_lineage_size);
/// Asynchronously commit a task to the GCS.
///
/// \param task The task to commit. It will be moved to the COMMITTING state.
/// \return Whether the task was successfully committed. This can fail if the
/// task was already in the COMMITTING state.
bool CommitTask(const Task &task);
/// Flush all tasks in the local cache that are not already being
/// committed. This is equivalent to all tasks in the UNCOMMITTED
/// state.
///
/// \return Void.
void FlushAllUncommittedTasks();
/// Add a task and its (estimated) uncommitted lineage to the local cache. We
/// will subscribe to commit notifications for all uncommitted tasks to
/// determine when it is safe to evict the lineage from the local cache.
///
/// \param task_id The ID of the uncommitted task to add.
/// \param uncommitted_lineage The task's uncommitted lineage. These are the
/// tasks that the given task is data-dependent on, but that have not
/// been committed to the GCS. This must contain the given task ID.
/// \return Void.
void AddUncommittedLineage(const TaskID &task_id, const Lineage &uncommitted_lineage);
/// Mark a task as having been explicitly forwarded to a node.
/// The lineage of the task is implicitly assumed to have also been forwarded.
///
/// \param task_id The ID of the task to get the uncommitted lineage for.
/// \param node_id The ID of the node to get the uncommitted lineage for.
void MarkTaskAsForwarded(const TaskID &task_id, const ClientID &node_id);
/// Get the uncommitted lineage of a task that hasn't been forwarded to a node yet.
/// The uncommitted lineage consists of all tasks in the given task's lineage
/// that have not been committed in the GCS, as far as we know.
///
/// \param task_id The ID of the task to get the uncommitted lineage for. If
/// the task is not found, then the returned lineage will be empty.
/// \param node_id The ID of the receiving node.
/// \return The uncommitted, unforwarded lineage of the task. The returned lineage
/// includes the entry for the requested entry_id.
Lineage GetUncommittedLineage(const TaskID &task_id, const ClientID &node_id) const;
/// Handle the commit of a task entry in the GCS. This attempts to evict the
/// task if possible.
///
/// \param task_id The ID of the task entry that was committed.
void HandleEntryCommitted(const TaskID &task_id);
/// Get a task. The task must be in the lineage cache.
///
/// \param task_id The ID of the task to get.
/// \return A const reference to the task data.
const Task &GetTaskOrDie(const TaskID &task_id) const;
/// Get whether the lineage cache contains the task.
///
/// \param task_id The ID of the task to get.
/// \return Whether the task is in the lineage cache.
bool ContainsTask(const TaskID &task_id) const;
/// Get all lineage in the lineage cache.
///
/// \return A const reference to the lineage.
const Lineage &GetLineage() const;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
private:
FRIEND_TEST(LineageCacheTest, BarReturnsZeroOnNull);
/// Flush a task that is in UNCOMMITTED_READY state.
void FlushTask(const TaskID &task_id);
/// Evict a single task. This should only be called if we are sure that the
/// task has been committed. The task will only be evicted if all of its
/// parents have also been evicted. If successful, then we will also attempt
/// to evict the task's children.
void EvictTask(const TaskID &task_id);
/// Subscribe to notifications for a task. Returns whether the operation
/// was successful (whether we were not already subscribed).
bool SubscribeTask(const TaskID &task_id);
/// Unsubscribe from notifications for a task. Returns whether the operation
/// was successful (whether we were subscribed).
bool UnsubscribeTask(const TaskID &task_id);
/// ID of this node.
ClientID self_node_id_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// All tasks and objects that we are responsible for writing back to the
/// GCS, and the tasks and objects in their lineage.
Lineage lineage_;
/// The tasks that we've subscribed to.
/// We will receive a notification for these tasks on commit.
std::unordered_set<TaskID> subscribed_tasks_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_LINEAGE_CACHE_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/lineage_cache_test.cc
|
C++
|
#include <list>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ray/common/task/task.h"
#include "ray/common/task/task_execution_spec.h"
#include "ray/common/task/task_spec.h"
#include "ray/common/task/task_util.h"
#include "ray/gcs/callback.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/raylet/format/node_manager_generated.h"
#include "ray/raylet/lineage_cache.h"
#include "ray/util/test_util.h"
namespace ray {
namespace raylet {
const static JobID kDefaultJobId = JobID::FromInt(1);
const static TaskID kDefaultDriverTaskId = TaskID::ForDriverTask(kDefaultJobId);
class MockGcsClient;
class MockTaskInfoAccessor : public gcs::RedisTaskInfoAccessor {
public:
MockTaskInfoAccessor(gcs::RedisGcsClient *gcs_client)
: RedisTaskInfoAccessor(gcs_client) {}
virtual ~MockTaskInfoAccessor() {}
void RegisterSubscribeCallback(
const gcs::SubscribeCallback<TaskID, rpc::TaskTableData> ¬ification_callback) {
notification_callback_ = notification_callback;
}
Status AsyncAdd(const std::shared_ptr<TaskTableData> &task_data,
const gcs::StatusCallback &done) {
TaskID task_id = TaskID::FromBinary(task_data->task().task_spec().task_id());
task_table_[task_id] = task_data;
auto callback = done;
// If we requested notifications for this task ID, send the notification as
// part of the callback.
if (subscribed_tasks_.count(task_id) == 1) {
callback = [this, done, task_id, task_data](Status status) {
done(status);
// If we're subscribed to the task to be added, also send a
// subscription notification.
notification_callback_(task_id, *task_data);
};
}
callbacks_.push_back({callback, task_id});
num_task_adds_++;
return ray::Status::OK();
}
Status RemoteAdd(std::shared_ptr<TaskTableData> task_data) {
TaskID task_id = TaskID::FromBinary(task_data->task().task_spec().task_id());
task_table_[task_id] = task_data;
// Send a notification after the add if the lineage cache requested
// notifications for this key.
bool send_notification = (subscribed_tasks_.count(task_id) == 1);
auto callback = [this, send_notification, task_id, task_data](Status status) {
if (send_notification) {
notification_callback_(task_id, *task_data);
}
};
return AsyncAdd(task_data, callback);
}
Status AsyncSubscribe(
const TaskID &task_id,
const gcs::SubscribeCallback<TaskID, rpc::TaskTableData> ¬ification_callback,
const gcs::StatusCallback &done) {
subscribed_tasks_.insert(task_id);
if (task_table_.count(task_id) == 1) {
notification_callbacks_.push_back({notification_callback_, task_id});
}
num_requested_notifications_ += 1;
return ray::Status::OK();
}
Status AsyncUnsubscribe(const TaskID &task_id, const gcs::StatusCallback &done) {
subscribed_tasks_.erase(task_id);
return ray::Status::OK();
}
void Flush() {
auto callbacks = std::move(callbacks_);
callbacks_.clear();
for (const auto &callback : callbacks) {
callback.first(Status::OK());
}
for (const auto &callback : notification_callbacks_) {
callback.first(callback.second, *task_table_[callback.second]);
}
}
const std::unordered_map<TaskID, std::shared_ptr<TaskTableData>> &TaskTable() const {
return task_table_;
}
const std::unordered_set<TaskID> &SubscribedTasks() const { return subscribed_tasks_; }
const int NumRequestedNotifications() const { return num_requested_notifications_; }
const int NumTaskAdds() const { return num_task_adds_; }
private:
std::unordered_map<TaskID, std::shared_ptr<TaskTableData>> task_table_;
std::vector<std::pair<gcs::StatusCallback, TaskID>> callbacks_;
typedef gcs::SubscribeCallback<TaskID, rpc::TaskTableData> TaskSubscribeCallback;
TaskSubscribeCallback notification_callback_;
std::vector<std::pair<TaskSubscribeCallback, TaskID>> notification_callbacks_;
std::unordered_set<TaskID> subscribed_tasks_;
int num_requested_notifications_ = 0;
int num_task_adds_ = 0;
};
class MockNodeInfoAccessor : public gcs::RedisNodeInfoAccessor {
public:
MockNodeInfoAccessor(gcs::RedisGcsClient *gcs_client, const ClientID &node_id)
: RedisNodeInfoAccessor(gcs_client), node_id_(node_id) {}
const ClientID &GetSelfId() const override { return node_id_; }
private:
ClientID node_id_;
};
class MockGcsClient : public gcs::RedisGcsClient {
public:
MockGcsClient(const gcs::GcsClientOptions &options, const ClientID &node_id)
: RedisGcsClient(options) {
task_table_fake_.reset(new gcs::raylet::TaskTable({nullptr}, this));
task_accessor_.reset(new MockTaskInfoAccessor(this));
node_accessor_.reset(new MockNodeInfoAccessor(this, node_id));
}
gcs::raylet::TaskTable &raylet_task_table() { return *task_table_fake_; }
MockTaskInfoAccessor &MockTasks() {
return *dynamic_cast<MockTaskInfoAccessor *>(task_accessor_.get());
}
private:
std::unique_ptr<gcs::raylet::TaskTable> task_table_fake_;
};
class LineageCacheTest : public ::testing::Test {
public:
LineageCacheTest() : max_lineage_size_(10), num_notifications_(0) {
gcs::GcsClientOptions options("10.10.10.10", 12100, "");
mock_gcs_ = std::make_shared<MockGcsClient>(options, node_id_);
lineage_cache_.reset(new LineageCache(node_id_, mock_gcs_, max_lineage_size_));
mock_gcs_->MockTasks().RegisterSubscribeCallback(
[this](const TaskID &task_id, const TaskTableData &data) {
lineage_cache_->HandleEntryCommitted(task_id);
num_notifications_++;
});
}
protected:
uint64_t max_lineage_size_;
uint64_t num_notifications_;
ClientID node_id_{ClientID::FromRandom()};
std::shared_ptr<MockGcsClient> mock_gcs_;
std::unique_ptr<LineageCache> lineage_cache_;
};
static inline Task ExampleTask(const std::vector<ObjectID> &arguments,
uint64_t num_returns) {
TaskSpecBuilder builder;
rpc::Address address;
builder.SetCommonTaskSpec(RandomTaskId(), Language::PYTHON, {"", "", ""}, JobID::Nil(),
RandomTaskId(), 0, RandomTaskId(), address, num_returns,
false, {}, {});
for (const auto &arg : arguments) {
builder.AddByRefArg(arg);
}
rpc::TaskExecutionSpec execution_spec_message;
execution_spec_message.set_num_forwards(1);
return Task(builder.Build(), TaskExecutionSpecification(execution_spec_message));
}
/// Helper method to create a Lineage object with a single task.
Lineage CreateSingletonLineage(const Task &task) {
Lineage singleton_lineage;
singleton_lineage.SetEntry(task, GcsStatus::UNCOMMITTED);
return singleton_lineage;
}
std::vector<ObjectID> InsertTaskChain(LineageCache &lineage_cache,
std::vector<Task> &inserted_tasks, int chain_size,
const std::vector<ObjectID> &initial_arguments,
int64_t num_returns) {
std::vector<ObjectID> arguments = initial_arguments;
for (int i = 0; i < chain_size; i++) {
auto task = ExampleTask(arguments, num_returns);
Lineage lineage = CreateSingletonLineage(task);
lineage_cache.AddUncommittedLineage(task.GetTaskSpecification().TaskId(), lineage);
inserted_tasks.push_back(task);
arguments.clear();
for (size_t j = 0; j < task.GetTaskSpecification().NumReturns(); j++) {
arguments.push_back(task.GetTaskSpecification().ReturnIdForPlasma(j));
}
}
return arguments;
}
TEST_F(LineageCacheTest, TestGetUncommittedLineage) {
// Insert two independent chains of tasks.
std::vector<Task> tasks1;
auto return_values1 =
InsertTaskChain(*lineage_cache_, tasks1, 3, std::vector<ObjectID>(), 1);
std::vector<TaskID> task_ids1;
for (const auto &task : tasks1) {
task_ids1.push_back(task.GetTaskSpecification().TaskId());
}
std::vector<Task> tasks2;
auto return_values2 =
InsertTaskChain(*lineage_cache_, tasks2, 2, std::vector<ObjectID>(), 2);
std::vector<TaskID> task_ids2;
for (const auto &task : tasks2) {
task_ids2.push_back(task.GetTaskSpecification().TaskId());
}
// Get the uncommitted lineage for the last task (the leaf) of one of the chains.
auto uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(task_ids1.back(), ClientID::Nil());
// Check that the uncommitted lineage is exactly equal to the first chain of tasks.
ASSERT_EQ(task_ids1.size(), uncommitted_lineage.GetEntries().size());
for (auto &task_id : task_ids1) {
ASSERT_TRUE(uncommitted_lineage.GetEntry(task_id));
}
// Insert one task that is dependent on the previous chains of tasks.
std::vector<Task> combined_tasks = tasks1;
combined_tasks.insert(combined_tasks.end(), tasks2.begin(), tasks2.end());
std::vector<ObjectID> combined_arguments = return_values1;
combined_arguments.insert(combined_arguments.end(), return_values2.begin(),
return_values2.end());
InsertTaskChain(*lineage_cache_, combined_tasks, 1, combined_arguments, 1);
std::vector<TaskID> combined_task_ids;
for (const auto &task : combined_tasks) {
combined_task_ids.push_back(task.GetTaskSpecification().TaskId());
}
// Get the uncommitted lineage for the inserted task.
uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(combined_task_ids.back(), ClientID::Nil());
// Check that the uncommitted lineage is exactly equal to the entire set of
// tasks inserted so far.
ASSERT_EQ(combined_task_ids.size(), uncommitted_lineage.GetEntries().size());
for (auto &task_id : combined_task_ids) {
ASSERT_TRUE(uncommitted_lineage.GetEntry(task_id));
}
}
TEST_F(LineageCacheTest, TestDuplicateUncommittedLineage) {
// Insert a chain of tasks.
std::vector<Task> tasks;
auto return_values =
InsertTaskChain(*lineage_cache_, tasks, 3, std::vector<ObjectID>(), 1);
std::vector<TaskID> task_ids;
for (const auto &task : tasks) {
task_ids.push_back(task.GetTaskSpecification().TaskId());
}
// Check that we subscribed to each of the uncommitted tasks.
ASSERT_EQ(mock_gcs_->MockTasks().NumRequestedNotifications(), task_ids.size());
// Check that if we add the same tasks as UNCOMMITTED again, we do not issue
// duplicate subscribe requests.
Lineage duplicate_lineage;
for (const auto &task : tasks) {
duplicate_lineage.SetEntry(task, GcsStatus::UNCOMMITTED);
}
lineage_cache_->AddUncommittedLineage(task_ids.back(), duplicate_lineage);
ASSERT_EQ(mock_gcs_->MockTasks().NumRequestedNotifications(), task_ids.size());
// Check that if we commit one of the tasks, we still do not issue any
// duplicate subscribe requests.
lineage_cache_->CommitTask(tasks.front());
lineage_cache_->AddUncommittedLineage(task_ids.back(), duplicate_lineage);
ASSERT_EQ(mock_gcs_->MockTasks().NumRequestedNotifications(), task_ids.size());
}
TEST_F(LineageCacheTest, TestMarkTaskAsForwarded) {
// Insert chain of tasks.
std::vector<Task> tasks;
auto return_values =
InsertTaskChain(*lineage_cache_, tasks, 4, std::vector<ObjectID>(), 1);
std::vector<TaskID> task_ids;
for (const auto &task : tasks) {
task_ids.push_back(task.GetTaskSpecification().TaskId());
}
auto node_id = ClientID::FromRandom();
auto node_id2 = ClientID::FromRandom();
auto forwarded_task_id = task_ids[task_ids.size() - 2];
auto remaining_task_id = task_ids[task_ids.size() - 1];
lineage_cache_->MarkTaskAsForwarded(forwarded_task_id, node_id);
auto uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(remaining_task_id, node_id);
auto uncommitted_lineage_all =
lineage_cache_->GetUncommittedLineage(remaining_task_id, node_id2);
ASSERT_EQ(1, uncommitted_lineage.GetEntries().size());
ASSERT_EQ(4, uncommitted_lineage_all.GetEntries().size());
ASSERT_TRUE(uncommitted_lineage.GetEntry(remaining_task_id));
// Check that lineage of requested task includes itself, regardless of whether
// it has been forwarded before.
auto uncommitted_lineage_forwarded =
lineage_cache_->GetUncommittedLineage(forwarded_task_id, node_id);
ASSERT_EQ(1, uncommitted_lineage_forwarded.GetEntries().size());
}
TEST_F(LineageCacheTest, TestWritebackReady) {
// Insert a chain of dependent tasks.
size_t num_tasks_flushed = 0;
std::vector<Task> tasks;
InsertTaskChain(*lineage_cache_, tasks, 3, std::vector<ObjectID>(), 1);
// Check that when no tasks have been marked as ready, we do not flush any
// entries.
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
// Check that after marking the first task as ready, we flush only that task.
ASSERT_TRUE(lineage_cache_->CommitTask(tasks.front()));
num_tasks_flushed++;
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
}
TEST_F(LineageCacheTest, TestWritebackOrder) {
// Insert a chain of dependent tasks.
std::vector<Task> tasks;
InsertTaskChain(*lineage_cache_, tasks, 3, std::vector<ObjectID>(), 1);
size_t num_tasks_flushed = tasks.size();
// Mark all tasks as ready. All tasks should be flushed.
for (const auto &task : tasks) {
ASSERT_TRUE(lineage_cache_->CommitTask(task));
}
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
}
TEST_F(LineageCacheTest, TestEvictChain) {
// Create a chain of 3 tasks.
size_t num_tasks_flushed = 0;
std::vector<Task> tasks;
std::vector<ObjectID> arguments;
for (int i = 0; i < 3; i++) {
auto task = ExampleTask(arguments, 1);
tasks.push_back(task);
arguments = {task.GetTaskSpecification().ReturnIdForPlasma(0)};
}
Lineage uncommitted_lineage;
for (const auto &task : tasks) {
uncommitted_lineage.SetEntry(task, GcsStatus::UNCOMMITTED);
}
// Mark the last task as ready to flush.
lineage_cache_->AddUncommittedLineage(tasks.back().GetTaskSpecification().TaskId(),
uncommitted_lineage);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), tasks.size());
ASSERT_TRUE(lineage_cache_->CommitTask(tasks.back()));
num_tasks_flushed++;
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
// Flush acknowledgements. The lineage cache should receive the commit for
// the flushed task, but its lineage should not be evicted yet.
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(lineage_cache_
->GetUncommittedLineage(tasks.back().GetTaskSpecification().TaskId(),
ClientID::Nil())
.GetEntries()
.size(),
tasks.size());
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), tasks.size());
// Simulate executing the task on a remote node and adding it to the GCS.
auto task_id = tasks.at(1).GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(lineage_cache_
->GetUncommittedLineage(tasks.back().GetTaskSpecification().TaskId(),
ClientID::Nil())
.GetEntries()
.size(),
tasks.size());
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), tasks.size());
// Simulate executing the task on a remote node and adding it to the GCS.
task_id = tasks.at(0).GetTaskSpecification().TaskId();
auto task_data_2 = std::make_shared<TaskTableData>();
task_data_2->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data_2));
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), 0);
ASSERT_EQ(lineage_cache_->GetLineage().GetChildrenSize(), 0);
}
TEST_F(LineageCacheTest, TestEvictManyParents) {
// Create some independent tasks.
std::vector<Task> parent_tasks;
std::vector<ObjectID> arguments;
for (int i = 0; i < 10; i++) {
auto task = ExampleTask({}, 1);
parent_tasks.push_back(task);
arguments.push_back(task.GetTaskSpecification().ReturnIdForPlasma(0));
auto lineage = CreateSingletonLineage(task);
lineage_cache_->AddUncommittedLineage(task.GetTaskSpecification().TaskId(), lineage);
}
// Create a child task that is dependent on all of the previous tasks.
auto child_task = ExampleTask(arguments, 1);
auto lineage = CreateSingletonLineage(child_task);
lineage_cache_->AddUncommittedLineage(child_task.GetTaskSpecification().TaskId(),
lineage);
// Flush the child task. Make sure that it remains in the cache, since none
// of its parents have been committed yet, and that the uncommitted lineage
// still includes all of the parent tasks.
size_t total_tasks = parent_tasks.size() + 1;
lineage_cache_->CommitTask(child_task);
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), total_tasks);
ASSERT_EQ(lineage_cache_
->GetUncommittedLineage(child_task.GetTaskSpecification().TaskId(),
ClientID::Nil())
.GetEntries()
.size(),
total_tasks);
// Flush each parent task and check for eviction safety.
for (const auto &parent_task : parent_tasks) {
lineage_cache_->CommitTask(parent_task);
mock_gcs_->MockTasks().Flush();
total_tasks--;
if (total_tasks > 1) {
// Each task should be evicted as soon as its commit is acknowledged,
// since the parent tasks have no dependencies.
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), total_tasks);
ASSERT_EQ(lineage_cache_
->GetUncommittedLineage(child_task.GetTaskSpecification().TaskId(),
ClientID::Nil())
.GetEntries()
.size(),
total_tasks);
} else {
// After the last task has been committed, then the child task should
// also be evicted. The lineage cache should now be empty.
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), 0);
}
}
ASSERT_EQ(lineage_cache_->GetLineage().GetChildrenSize(), 0);
}
TEST_F(LineageCacheTest, TestEviction) {
// Insert a chain of dependent tasks.
uint64_t lineage_size = max_lineage_size_ + 1;
size_t num_tasks_flushed = 0;
std::vector<Task> tasks;
InsertTaskChain(*lineage_cache_, tasks, lineage_size, std::vector<ObjectID>(), 1);
// Check that the last task in the chain still has all tasks in its
// uncommitted lineage.
const auto last_task_id = tasks.back().GetTaskSpecification().TaskId();
auto uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(last_task_id, ClientID::Nil());
ASSERT_EQ(uncommitted_lineage.GetEntries().size(), lineage_size);
// Simulate executing the first task on a remote node and adding it to the
// GCS.
auto it = tasks.begin();
auto task_id = it->GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
it++;
// Check that the remote task is flushed.
num_tasks_flushed++;
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
// Check that the last task in the chain still has all tasks in its
// uncommitted lineage.
ASSERT_EQ(uncommitted_lineage.GetEntries().size(), lineage_size);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(),
lineage_size - num_tasks_flushed);
// Simulate executing all the rest of the tasks except the last one on a
// remote node and adding them to the GCS.
tasks.pop_back();
for (; it != tasks.end(); it++) {
auto task_id = it->GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
// Check that the remote task is flushed.
num_tasks_flushed++;
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(),
lineage_size - num_tasks_flushed);
}
// All tasks have now been flushed. Check that enough lineage has been
// evicted that the uncommitted lineage is now less than the maximum size.
uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(last_task_id, ClientID::Nil());
ASSERT_TRUE(uncommitted_lineage.GetEntries().size() < max_lineage_size_);
// The remaining task should have no uncommitted lineage.
ASSERT_EQ(uncommitted_lineage.GetEntries().size(), 1);
ASSERT_EQ(lineage_cache_->GetLineage().GetChildrenSize(), 1);
}
TEST_F(LineageCacheTest, TestOutOfOrderEviction) {
// Insert a chain of dependent tasks that is more than twice as long as the
// maximum lineage size. This will ensure that we request notifications for
// at most 2 remote tasks.
uint64_t lineage_size = (2 * max_lineage_size_) + 2;
size_t num_tasks_flushed = 0;
std::vector<Task> tasks;
InsertTaskChain(*lineage_cache_, tasks, lineage_size, std::vector<ObjectID>(), 1);
// Check that the last task in the chain still has all tasks in its
// uncommitted lineage.
const auto last_task_id = tasks.back().GetTaskSpecification().TaskId();
auto uncommitted_lineage =
lineage_cache_->GetUncommittedLineage(last_task_id, ClientID::Nil());
ASSERT_EQ(uncommitted_lineage.GetEntries().size(), lineage_size);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), lineage_size);
// Simulate executing the rest of the tasks on a remote node and receiving
// the notifications from the GCS in reverse order of execution.
auto last_task = tasks.front();
tasks.erase(tasks.begin());
for (auto it = tasks.rbegin(); it != tasks.rend(); it++) {
auto task_id = it->GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
// Check that the remote task is flushed.
num_tasks_flushed++;
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), lineage_size);
}
// Flush the last task. The lineage should not get evicted until this task's
// commit is received.
auto task_id = last_task.GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
num_tasks_flushed++;
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), 0);
ASSERT_EQ(lineage_cache_->GetLineage().GetChildrenSize(), 0);
}
TEST_F(LineageCacheTest, TestEvictionUncommittedChildren) {
// Insert a chain of dependent tasks.
size_t num_tasks_flushed = 0;
uint64_t lineage_size = max_lineage_size_ + 1;
std::vector<Task> tasks;
InsertTaskChain(*lineage_cache_, tasks, lineage_size, std::vector<ObjectID>(), 1);
// Add more tasks to the lineage cache that will remain local. Each of these
// tasks is dependent one of the tasks that was forwarded above.
for (const auto &task : tasks) {
auto return_id = task.GetTaskSpecification().ReturnIdForPlasma(0);
auto dependent_task = ExampleTask({return_id}, 1);
auto lineage = CreateSingletonLineage(dependent_task);
lineage_cache_->AddUncommittedLineage(dependent_task.GetTaskSpecification().TaskId(),
lineage);
ASSERT_TRUE(lineage_cache_->CommitTask(dependent_task));
// Once the forwarded tasks are evicted from the lineage cache, we expect
// each of these dependent tasks to be flushed, since all of their
// dependencies have been committed.
num_tasks_flushed++;
}
// Simulate executing the tasks on the remote node in reverse order and
// adding them to the GCS. Lineage at the local node should not get evicted
// until after the final remote task is executed, since a task can only be
// evicted once all of its ancestors have been committed.
for (auto it = tasks.rbegin(); it != tasks.rend(); it++) {
auto task_id = it->GetTaskSpecification().TaskId();
auto task_data = std::make_shared<TaskTableData>();
task_data->mutable_task()->mutable_task_spec()->set_task_id(task_id.Binary());
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), lineage_size * 2);
RAY_CHECK_OK(mock_gcs_->MockTasks().RemoteAdd(task_data));
num_tasks_flushed++;
mock_gcs_->MockTasks().Flush();
ASSERT_EQ(mock_gcs_->MockTasks().TaskTable().size(), num_tasks_flushed);
}
// Check that after the final remote task is executed, all local lineage is
// now evicted.
ASSERT_EQ(lineage_cache_->GetLineage().GetEntries().size(), 0);
ASSERT_EQ(lineage_cache_->GetLineage().GetChildrenSize(), 0);
}
TEST_F(LineageCacheTest, TestFlushAllUncommittedTasks) {
// Insert a chain of tasks.
std::vector<Task> tasks;
auto return_values =
InsertTaskChain(*lineage_cache_, tasks, 3, std::vector<ObjectID>(), 1);
std::vector<TaskID> task_ids;
for (const auto &task : tasks) {
task_ids.push_back(task.GetTaskSpecification().TaskId());
}
// Check that we subscribed to each of the uncommitted tasks.
ASSERT_EQ(mock_gcs_->MockTasks().NumRequestedNotifications(), task_ids.size());
// Flush all uncommitted tasks and make sure we add all tasks to
// the task table.
lineage_cache_->FlushAllUncommittedTasks();
ASSERT_EQ(mock_gcs_->MockTasks().NumTaskAdds(), tasks.size());
// Flush again and make sure there are no new tasks added to the
// task table.
lineage_cache_->FlushAllUncommittedTasks();
ASSERT_EQ(mock_gcs_->MockTasks().NumTaskAdds(), tasks.size());
// Flush all GCS notifications.
mock_gcs_->MockTasks().Flush();
// Make sure that we unsubscribed to the uncommitted tasks before
// we flushed them.
ASSERT_EQ(num_notifications_, 0);
// Flush again and make sure there are no new tasks added to the
// task table.
lineage_cache_->FlushAllUncommittedTasks();
ASSERT_EQ(mock_gcs_->MockTasks().NumTaskAdds(), tasks.size());
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/main.cc
|
C++
|
#include <iostream>
#include "gflags/gflags.h"
#include "ray/common/id.h"
#include "ray/common/ray_config.h"
#include "ray/common/status.h"
#include "ray/common/task/task_common.h"
#include "ray/raylet/raylet.h"
#include "ray/stats/stats.h"
DEFINE_string(raylet_socket_name, "", "The socket name of raylet.");
DEFINE_string(store_socket_name, "", "The socket name of object store.");
DEFINE_int32(object_manager_port, -1, "The port of object manager.");
DEFINE_int32(node_manager_port, -1, "The port of node manager.");
DEFINE_string(node_ip_address, "", "The ip address of this node.");
DEFINE_string(redis_address, "", "The ip address of redis server.");
DEFINE_int32(redis_port, -1, "The port of redis server.");
DEFINE_int32(num_initial_workers, 0, "Number of initial workers.");
DEFINE_int32(maximum_startup_concurrency, 1, "Maximum startup concurrency");
DEFINE_string(static_resource_list, "", "The static resource list of this node.");
DEFINE_string(config_list, "", "The raylet config list of this node.");
DEFINE_string(python_worker_command, "", "Python worker command.");
DEFINE_string(java_worker_command, "", "Java worker command.");
DEFINE_string(redis_password, "", "The password of redis.");
DEFINE_string(temp_dir, "", "Temporary directory.");
DEFINE_string(session_dir, "", "The path of this ray session directory.");
DEFINE_bool(disable_stats, false, "Whether disable the stats.");
DEFINE_string(stat_address, "127.0.0.1:8888", "The address that we report metrics to.");
DEFINE_bool(enable_stdout_exporter, false,
"Whether enable the stdout exporter for stats.");
#ifndef RAYLET_TEST
int main(int argc, char *argv[]) {
InitShutdownRAII ray_log_shutdown_raii(ray::RayLog::StartRayLog,
ray::RayLog::ShutDownRayLog, argv[0],
ray::RayLogLevel::INFO,
/*log_dir=*/"");
ray::RayLog::InstallFailureSignalHandler();
gflags::ParseCommandLineFlags(&argc, &argv, true);
const std::string raylet_socket_name = FLAGS_raylet_socket_name;
const std::string store_socket_name = FLAGS_store_socket_name;
const int object_manager_port = static_cast<int>(FLAGS_object_manager_port);
const int node_manager_port = static_cast<int>(FLAGS_node_manager_port);
const std::string node_ip_address = FLAGS_node_ip_address;
const std::string redis_address = FLAGS_redis_address;
const int redis_port = static_cast<int>(FLAGS_redis_port);
const int num_initial_workers = static_cast<int>(FLAGS_num_initial_workers);
const int maximum_startup_concurrency =
static_cast<int>(FLAGS_maximum_startup_concurrency);
const std::string static_resource_list = FLAGS_static_resource_list;
const std::string config_list = FLAGS_config_list;
const std::string python_worker_command = FLAGS_python_worker_command;
const std::string java_worker_command = FLAGS_java_worker_command;
const std::string redis_password = FLAGS_redis_password;
const std::string temp_dir = FLAGS_temp_dir;
const std::string session_dir = FLAGS_session_dir;
const bool disable_stats = FLAGS_disable_stats;
const std::string stat_address = FLAGS_stat_address;
const bool enable_stdout_exporter = FLAGS_enable_stdout_exporter;
gflags::ShutDownCommandLineFlags();
// Initialize stats.
const ray::stats::TagsType global_tags = {
{ray::stats::JobNameKey, "raylet"},
{ray::stats::VersionKey, "0.9.0.dev0"},
{ray::stats::NodeAddressKey, node_ip_address}};
ray::stats::Init(stat_address, global_tags, disable_stats, enable_stdout_exporter);
// Configuration for the node manager.
ray::raylet::NodeManagerConfig node_manager_config;
std::unordered_map<std::string, double> static_resource_conf;
std::unordered_map<std::string, std::string> raylet_config;
// Parse the configuration list.
std::istringstream config_string(config_list);
std::string config_name;
std::string config_value;
while (std::getline(config_string, config_name, ',')) {
RAY_CHECK(std::getline(config_string, config_value, ','));
// TODO(rkn): The line below could throw an exception. What should we do about this?
raylet_config[config_name] = config_value;
}
RayConfig::instance().initialize(raylet_config);
// Parse the resource list.
std::istringstream resource_string(static_resource_list);
std::string resource_name;
std::string resource_quantity;
while (std::getline(resource_string, resource_name, ',')) {
RAY_CHECK(std::getline(resource_string, resource_quantity, ','));
// TODO(rkn): The line below could throw an exception. What should we do about this?
static_resource_conf[resource_name] = std::stod(resource_quantity);
}
node_manager_config.resource_config = ray::ResourceSet(std::move(static_resource_conf));
RAY_LOG(DEBUG) << "Starting raylet with static resource configuration: "
<< node_manager_config.resource_config.ToString();
node_manager_config.node_manager_address = node_ip_address;
node_manager_config.node_manager_port = node_manager_port;
node_manager_config.num_initial_workers = num_initial_workers;
node_manager_config.maximum_startup_concurrency = maximum_startup_concurrency;
if (!python_worker_command.empty()) {
node_manager_config.worker_commands.emplace(
make_pair(ray::Language::PYTHON, SplitStrByWhitespaces(python_worker_command)));
}
if (!java_worker_command.empty()) {
node_manager_config.worker_commands.emplace(
make_pair(ray::Language::JAVA, SplitStrByWhitespaces(java_worker_command)));
}
if (python_worker_command.empty() && java_worker_command.empty()) {
RAY_CHECK(0)
<< "Either Python worker command or Java worker command should be provided.";
}
node_manager_config.heartbeat_period_ms =
RayConfig::instance().raylet_heartbeat_timeout_milliseconds();
node_manager_config.debug_dump_period_ms =
RayConfig::instance().debug_dump_period_milliseconds();
node_manager_config.fair_queueing_enabled =
RayConfig::instance().fair_queueing_enabled();
node_manager_config.object_pinning_enabled =
RayConfig::instance().object_pinning_enabled();
node_manager_config.max_lineage_size = RayConfig::instance().max_lineage_size();
node_manager_config.store_socket_name = store_socket_name;
node_manager_config.temp_dir = temp_dir;
node_manager_config.session_dir = session_dir;
// Configuration for the object manager.
ray::ObjectManagerConfig object_manager_config;
object_manager_config.object_manager_port = object_manager_port;
object_manager_config.store_socket_name = store_socket_name;
object_manager_config.pull_timeout_ms =
RayConfig::instance().object_manager_pull_timeout_ms();
object_manager_config.push_timeout_ms =
RayConfig::instance().object_manager_push_timeout_ms();
int num_cpus = static_cast<int>(static_resource_conf["CPU"]);
object_manager_config.rpc_service_threads_number =
std::min(std::max(2, num_cpus / 4), 8);
object_manager_config.object_chunk_size =
RayConfig::instance().object_manager_default_chunk_size();
RAY_LOG(DEBUG) << "Starting object manager with configuration: \n"
<< "rpc_service_threads_number = "
<< object_manager_config.rpc_service_threads_number
<< ", object_chunk_size = " << object_manager_config.object_chunk_size;
// Initialize the node manager.
boost::asio::io_service main_service;
// Initialize gcs client
ray::gcs::GcsClientOptions client_options(redis_address, redis_port, redis_password);
auto gcs_client = std::make_shared<ray::gcs::RedisGcsClient>(client_options);
RAY_CHECK_OK(gcs_client->Connect(main_service));
std::unique_ptr<ray::raylet::Raylet> server(new ray::raylet::Raylet(
main_service, raylet_socket_name, node_ip_address, redis_address, redis_port,
redis_password, node_manager_config, object_manager_config, gcs_client));
server->Start();
// Destroy the Raylet on a SIGTERM. The pointer to main_service is
// guaranteed to be valid since this function will run the event loop
// instead of returning immediately.
// We should stop the service and remove the local socket file.
auto handler = [&main_service, &raylet_socket_name, &server, &gcs_client](
const boost::system::error_code &error, int signal_number) {
RAY_LOG(INFO) << "Raylet received SIGTERM, shutting down...";
server->Stop();
gcs_client->Disconnect();
main_service.stop();
remove(raylet_socket_name.c_str());
};
boost::asio::signal_set signals(main_service, SIGTERM);
signals.async_wait(handler);
main_service.run();
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/mock_gcs_client.cc
|
C++
|
#include <functional>
#include <iostream>
#include "ray/raylet/mock_gcs_client.h"
namespace ray {
ray::Status ObjectTable::GetObjectClientIDs(const ray::ObjectID &object_id,
const ClientIDsCallback &success,
const FailCallback &fail) {
RAY_LOG(DEBUG) << "GetObjectClientIDs " << object_id;
if (client_lookup.count(object_id) > 0) {
if (!client_lookup[object_id].empty()) {
std::vector<ClientID> v;
for (auto client_id : client_lookup[object_id]) {
v.push_back(client_id);
}
success(std::move(v));
return Status::OK();
} else {
fail(Status::KeyError("ObjectID has no clients."));
return Status::OK();
}
} else {
fail(Status::KeyError("ObjectID doesn't exist."));
return Status::OK();
}
}
ray::Status ObjectTable::Add(const ObjectID &object_id, const ClientID &client_id,
const DoneCallback &done_callback) {
if (client_lookup.count(object_id) == 0) {
RAY_LOG(DEBUG) << "Add ObjectID set " << object_id;
client_lookup[object_id] = std::unordered_set<ClientID>();
} else if (client_lookup[object_id].count(client_id) != 0) {
return ray::Status::KeyError("ClientID already exists.");
}
RAY_LOG(DEBUG) << "Insert ClientID " << client_id;
client_lookup[object_id].insert(client_id);
done_callback();
return ray::Status::OK();
}
ray::Status ObjectTable::Remove(const ObjectID &object_id, const ClientID &client_id,
const DoneCallback &done_callback) {
if (client_lookup.count(object_id) == 0) {
return ray::Status::KeyError("ObjectID doesn't exist.");
} else if (client_lookup[object_id].count(client_id) == 0) {
return ray::Status::KeyError("ClientID doesn't exist.");
}
client_lookup[object_id].erase(client_id);
done_callback();
return ray::Status::OK();
}
ray::Status ClientTable::GetClientIds(ClientIDsCallback callback) {
std::vector<ClientID> keys;
keys.reserve(info_lookup.size());
for (auto kv : info_lookup) {
keys.push_back(kv.first);
}
callback(keys);
return Status::OK();
}
void ClientTable::GetClientInformationSet(const std::vector<ClientID> &client_ids,
ManyInfoCallback callback,
FailCallback failcb) {
std::vector<ClientInformation> info_vec;
for (const auto &client_id : client_ids) {
if (info_lookup.count(client_id) != 0) {
info_vec.push_back(info_lookup.at(client_id));
}
}
if (info_vec.empty()) {
failcb(Status::KeyError("ClientID not found."));
} else {
callback(info_vec);
}
}
void ClientTable::GetClientInformation(const ClientID &client_id,
SingleInfoCallback callback, FailCallback failcb) {
if (info_lookup.count(client_id) == 0) {
failcb(ray::Status::KeyError("CleintID not found."));
} else {
callback(info_lookup.at(client_id));
}
}
ray::Status ClientTable::Add(const ClientID &client_id, const std::string &ip,
uint16_t port, DoneCallback done_callback) {
if (info_lookup.count(client_id) != 0) {
return ray::Status::KeyError("ClientID already exists.");
}
info_lookup.emplace(client_id, ClientInformation(client_id, ip, port));
done_callback();
return ray::Status::OK();
}
ray::Status ClientTable::Remove(const ClientID &client_id, DoneCallback done_callback) {
if (info_lookup.count(client_id) == 0) {
return ray::Status::KeyError("ClientID doesn't exist.");
}
info_lookup.erase(client_id);
done_callback();
return ray::Status::OK();
}
ClientID GcsClient::Register(const std::string &ip, uint16_t port) {
ClientID client_id = ClientID::FromRandom();
// TODO: handle client registration failure.
ray::Status status = client_table().Add(std::move(client_id), ip, port, []() {});
return client_id;
}
ClientTable &GcsClient::client_table() { return *client_table_; }
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/mock_gcs_client.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_MOCK_GCS_CLIENT_H
#define RAY_RAYLET_MOCK_GCS_CLIENT_H
#include <cstdint>
#include <functional>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/bind.hpp>
#include "ray/common/id.h"
#include "ray/common/status.h"
namespace ray {
class ObjectTable {
public:
using DoneCallback = std::function<void()>;
using ClientIDsCallback = std::function<void(const std::vector<ray::ClientID> &)>;
using FailCallback = std::function<void(const ray::Status &)>;
ray::Status Add(const ObjectID &object_id, const ClientID &client_id,
const DoneCallback &done);
ray::Status Remove(const ObjectID &object_id, const ClientID &client_id,
const DoneCallback &done);
ray::Status GetObjectClientIDs(const ObjectID &object_id, const ClientIDsCallback &,
const FailCallback &);
private:
std::vector<ClientID> empty_set_;
std::unordered_map<ObjectID, std::unordered_set<ClientID>> client_lookup;
};
class ClientInformation {
public:
ClientInformation(const ClientID &client_id, const std::string &ip_address,
uint16_t port)
: client_id_(client_id), ip_address_(ip_address), port_(port) {}
const ClientID &GetClientId() const { return client_id_; }
const std::string &GetIp() const { return ip_address_; }
const uint16_t &GetPort() const { return port_; }
private:
ClientID client_id_;
std::string ip_address_;
uint16_t port_;
};
class ClientTable {
public:
typedef std::unordered_map<ClientID, ClientInformation> info_type;
using ClientIDsCallback = std::function<void(std::vector<ray::ClientID>)>;
using SingleInfoCallback = std::function<void(ClientInformation info)>;
using ManyInfoCallback = std::function<void(std::vector<ClientInformation> info_vec)>;
using DoneCallback = std::function<void()>;
using FailCallback = std::function<void(ray::Status)>;
ray::Status GetClientIds(ClientIDsCallback cb);
void GetClientInformationSet(const std::vector<ClientID> &client_ids,
ManyInfoCallback cb, FailCallback failcb);
void GetClientInformation(const ClientID &client_id, SingleInfoCallback callback,
FailCallback failcb);
ray::Status Add(const ClientID &client_id, const std::string &ip, uint16_t port,
DoneCallback cb);
ray::Status Remove(const ClientID &client_id, DoneCallback done);
private:
info_type info_lookup;
};
class GcsClient {
public:
GcsClient() {
this->object_table_.reset(new ObjectTable());
this->client_table_.reset(new ClientTable());
}
// Register the ip and port of the connecting client.
ClientID Register(const std::string &ip, uint16_t port);
ClientTable &client_table();
private:
std::unique_ptr<ObjectTable> object_table_;
std::unique_ptr<ClientTable> client_table_;
};
} // namespace ray
#endif // RAY_RAYLET_MOCK_GCS_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/monitor.cc
|
C++
|
#include "ray/raylet/monitor.h"
#include "ray/common/ray_config.h"
#include "ray/common/status.h"
#include "ray/gcs/pb_util.h"
#include "ray/util/util.h"
namespace ray {
namespace raylet {
/// \class Monitor
///
/// The monitor is responsible for listening for heartbeats from Raylets and
/// deciding when a Raylet has died. If the monitor does not hear from a Raylet
/// within heartbeat_timeout_milliseconds * num_heartbeats_timeout (defined in
/// the Ray configuration), then the monitor will mark that Raylet as dead in
/// the client table, which broadcasts the event to all other Raylets.
Monitor::Monitor(boost::asio::io_service &io_service,
const gcs::GcsClientOptions &gcs_client_options)
: gcs_client_(new gcs::RedisGcsClient(gcs_client_options)),
num_heartbeats_timeout_(RayConfig::instance().num_heartbeats_timeout()),
heartbeat_timer_(io_service) {
RAY_CHECK_OK(gcs_client_->Connect(io_service));
}
void Monitor::HandleHeartbeat(const ClientID &node_id,
const HeartbeatTableData &heartbeat_data) {
heartbeats_[node_id] = num_heartbeats_timeout_;
heartbeat_buffer_[node_id] = heartbeat_data;
}
void Monitor::Start() {
const auto heartbeat_callback = [this](const ClientID &id,
const HeartbeatTableData &heartbeat_data) {
HandleHeartbeat(id, heartbeat_data);
};
RAY_CHECK_OK(gcs_client_->Nodes().AsyncSubscribeHeartbeat(heartbeat_callback, nullptr));
Tick();
}
/// A periodic timer that checks for timed out clients.
void Monitor::Tick() {
for (auto it = heartbeats_.begin(); it != heartbeats_.end();) {
it->second--;
if (it->second == 0) {
if (dead_nodes_.count(it->first) == 0) {
auto node_id = it->first;
RAY_LOG(WARNING) << "Node timed out: " << node_id;
auto lookup_callback = [this, node_id](Status status,
const std::vector<GcsNodeInfo> &all_node) {
RAY_CHECK(status.ok()) << status.CodeAsString();
bool marked = false;
for (const auto &node : all_node) {
if (node_id.Binary() == node.node_id() && node.state() == GcsNodeInfo::DEAD) {
// The node has been marked dead by itself.
marked = true;
}
}
if (!marked) {
RAY_CHECK_OK(
gcs_client_->Nodes().AsyncUnregister(node_id, /* callback */ nullptr));
// Broadcast a warning to all of the drivers indicating that the node
// has been marked as dead.
// TODO(rkn): Define this constant somewhere else.
std::string type = "node_removed";
std::ostringstream error_message;
error_message << "The node with client ID " << node_id
<< " has been marked dead because the monitor"
<< " has missed too many heartbeats from it.";
auto error_data_ptr =
gcs::CreateErrorTableData(type, error_message.str(), current_time_ms());
RAY_CHECK_OK(
gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
};
RAY_CHECK_OK(gcs_client_->Nodes().AsyncGetAll(lookup_callback));
dead_nodes_.insert(node_id);
}
it = heartbeats_.erase(it);
} else {
it++;
}
}
// Send any buffered heartbeats as a single publish.
if (!heartbeat_buffer_.empty()) {
auto batch = std::make_shared<HeartbeatBatchTableData>();
for (const auto &heartbeat : heartbeat_buffer_) {
batch->add_batch()->CopyFrom(heartbeat.second);
}
RAY_CHECK_OK(gcs_client_->Nodes().AsyncReportBatchHeartbeat(batch, nullptr));
heartbeat_buffer_.clear();
}
auto heartbeat_period = boost::posix_time::milliseconds(
RayConfig::instance().raylet_heartbeat_timeout_milliseconds());
heartbeat_timer_.expires_from_now(heartbeat_period);
heartbeat_timer_.async_wait([this](const boost::system::error_code &error) {
RAY_CHECK(!error);
Tick();
});
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/monitor.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_MONITOR_H
#define RAY_RAYLET_MONITOR_H
#include <memory>
#include <unordered_set>
#include "ray/common/id.h"
#include "ray/gcs/redis_gcs_client.h"
namespace ray {
namespace raylet {
using rpc::GcsNodeInfo;
using rpc::HeartbeatBatchTableData;
using rpc::HeartbeatTableData;
class Monitor {
public:
/// Create a Raylet monitor attached to the given GCS address and port.
///
/// \param io_service The event loop to run the monitor on.
/// \param redis_address The GCS Redis address to connect to.
/// \param redis_port The GCS Redis port to connect to.
Monitor(boost::asio::io_service &io_service,
const gcs::GcsClientOptions &gcs_client_options);
/// Start the monitor. Listen for heartbeats from Raylets and mark Raylets
/// that do not send a heartbeat within a given period as dead.
void Start();
/// A periodic timer that fires on every heartbeat period. Raylets that have
/// not sent a heartbeat within the last num_heartbeats_timeout ticks will be
/// marked as dead in the client table.
void Tick();
/// Handle a heartbeat from a Raylet.
///
/// \param client_id The client ID of the Raylet that sent the heartbeat.
/// \param heartbeat_data The heartbeat sent by the client.
void HandleHeartbeat(const ClientID &client_id,
const HeartbeatTableData &heartbeat_data);
private:
/// A client to the GCS, through which heartbeats are received.
std::unique_ptr<gcs::GcsClient> gcs_client_;
/// The number of heartbeats that can be missed before a client is removed.
int64_t num_heartbeats_timeout_;
/// A timer that ticks every heartbeat_timeout_ms_ milliseconds.
boost::asio::deadline_timer heartbeat_timer_;
/// For each Raylet that we receive a heartbeat from, the number of ticks
/// that may pass before the Raylet will be declared dead.
std::unordered_map<ClientID, int64_t> heartbeats_;
/// The Raylets that have been marked as dead in gcs.
std::unordered_set<ClientID> dead_nodes_;
/// A buffer containing heartbeats received from node managers in the last tick.
std::unordered_map<ClientID, HeartbeatTableData> heartbeat_buffer_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_MONITOR_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/monitor_main.cc
|
C++
|
#include <iostream>
#include "ray/common/ray_config.h"
#include "ray/raylet/monitor.h"
#include "ray/util/util.h"
#include "gflags/gflags.h"
DEFINE_string(redis_address, "", "The ip address of redis.");
DEFINE_int32(redis_port, -1, "The port of redis.");
DEFINE_string(config_list, "", "The config list of raylet.");
DEFINE_string(redis_password, "", "The password of redis.");
int main(int argc, char *argv[]) {
InitShutdownRAII ray_log_shutdown_raii(ray::RayLog::StartRayLog,
ray::RayLog::ShutDownRayLog, argv[0],
ray::RayLogLevel::INFO, /*log_dir=*/"");
ray::RayLog::InstallFailureSignalHandler();
gflags::ParseCommandLineFlags(&argc, &argv, true);
const std::string redis_address = FLAGS_redis_address;
const int redis_port = static_cast<int>(FLAGS_redis_port);
const std::string config_list = FLAGS_config_list;
const std::string redis_password = FLAGS_redis_password;
gflags::ShutDownCommandLineFlags();
ray::gcs::GcsClientOptions gcs_client_options(redis_address, redis_port,
redis_password);
std::unordered_map<std::string, std::string> raylet_config;
// Parse the configuration list.
std::istringstream config_string(config_list);
std::string config_name;
std::string config_value;
while (std::getline(config_string, config_name, ',')) {
RAY_CHECK(std::getline(config_string, config_value, ','));
// TODO(rkn): The line below could throw an exception. What should we do about this?
raylet_config[config_name] = config_value;
}
RayConfig::instance().initialize(raylet_config);
boost::asio::io_service io_service;
// The code below is commented out because it appears to introduce a double
// free error in the raylet monitor.
// // Destroy the Raylet monitor on a SIGTERM. The pointer to io_service is
// // guaranteed to be valid since this function will run the event loop
// // instead of returning immediately.
// auto handler = [&io_service](const boost::system::error_code &error,
// int signal_number) { io_service.stop(); };
// boost::asio::signal_set signals(io_service, SIGTERM);
// signals.async_wait(handler);
// Initialize the monitor.
ray::raylet::Monitor monitor(io_service, gcs_client_options);
monitor.Start();
io_service.run();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/node_manager.cc
|
C++
|
#include "ray/raylet/node_manager.h"
#include <cctype>
#include <fstream>
#include <memory>
#include "ray/common/buffer.h"
#include "ray/common/common_protocol.h"
#include "ray/common/id.h"
#include "ray/common/status.h"
#include "ray/gcs/pb_util.h"
#include "ray/raylet/format/node_manager_generated.h"
#include "ray/stats/stats.h"
#include "ray/util/sample.h"
namespace {
#define RAY_CHECK_ENUM(x, y) \
static_assert(static_cast<int>(x) == static_cast<int>(y), "protocol mismatch")
/// A helper function to return the expected actor counter for a given actor
/// and actor handle, according to the given actor registry. If a task's
/// counter is less than the returned value, then the task is a duplicate. If
/// the task's counter is equal to the returned value, then the task should be
/// the next to run.
int64_t GetExpectedTaskCounter(
const std::unordered_map<ray::ActorID, ray::raylet::ActorRegistration>
&actor_registry,
const ray::ActorID &actor_id, const ray::TaskID &actor_caller_id) {
auto actor_entry = actor_registry.find(actor_id);
RAY_CHECK(actor_entry != actor_registry.end());
const auto &frontier = actor_entry->second.GetFrontier();
int64_t expected_task_counter = 0;
auto frontier_entry = frontier.find(actor_caller_id);
if (frontier_entry != frontier.end()) {
expected_task_counter = frontier_entry->second.task_counter;
}
return expected_task_counter;
};
struct ActorStats {
int live_actors = 0;
int dead_actors = 0;
int reconstructing_actors = 0;
int max_num_handles = 0;
};
/// A helper function to return the statistical data of actors in this node manager.
ActorStats GetActorStatisticalData(
std::unordered_map<ray::ActorID, ray::raylet::ActorRegistration> actor_registry) {
ActorStats item;
for (auto &pair : actor_registry) {
if (pair.second.GetState() == ray::rpc::ActorTableData::ALIVE) {
item.live_actors += 1;
} else if (pair.second.GetState() == ray::rpc::ActorTableData::RECONSTRUCTING) {
item.reconstructing_actors += 1;
} else {
item.dead_actors += 1;
}
if (pair.second.NumHandles() > item.max_num_handles) {
item.max_num_handles = pair.second.NumHandles();
}
}
return item;
}
} // namespace
namespace ray {
namespace raylet {
NodeManager::NodeManager(boost::asio::io_service &io_service,
const ClientID &self_node_id, const NodeManagerConfig &config,
ObjectManager &object_manager,
std::shared_ptr<gcs::GcsClient> gcs_client,
std::shared_ptr<ObjectDirectoryInterface> object_directory)
: self_node_id_(self_node_id),
io_service_(io_service),
object_manager_(object_manager),
gcs_client_(gcs_client),
object_directory_(object_directory),
heartbeat_timer_(io_service),
heartbeat_period_(std::chrono::milliseconds(config.heartbeat_period_ms)),
debug_dump_period_(config.debug_dump_period_ms),
fair_queueing_enabled_(config.fair_queueing_enabled),
object_pinning_enabled_(config.object_pinning_enabled),
temp_dir_(config.temp_dir),
object_manager_profile_timer_(io_service),
initial_config_(config),
local_available_resources_(config.resource_config),
worker_pool_(io_service, config.num_initial_workers,
config.maximum_startup_concurrency, gcs_client_,
config.worker_commands),
scheduling_policy_(local_queues_),
reconstruction_policy_(
io_service_,
[this](const TaskID &task_id, const ObjectID &required_object_id) {
HandleTaskReconstruction(task_id, required_object_id);
},
RayConfig::instance().initial_reconstruction_timeout_milliseconds(),
self_node_id_, gcs_client_, object_directory_),
task_dependency_manager_(
object_manager, reconstruction_policy_, io_service, self_node_id_,
RayConfig::instance().initial_reconstruction_timeout_milliseconds(),
gcs_client_),
lineage_cache_(self_node_id_, gcs_client_, config.max_lineage_size),
actor_registry_(),
node_manager_server_("NodeManager", config.node_manager_port),
node_manager_service_(io_service, *this),
client_call_manager_(io_service),
new_scheduler_enabled_(RayConfig::instance().new_scheduler_enabled()) {
RAY_CHECK(heartbeat_period_.count() > 0);
// Initialize the resource map with own cluster resource configuration.
cluster_resource_map_.emplace(self_node_id_,
SchedulingResources(config.resource_config));
RAY_CHECK_OK(object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
ObjectID object_id = ObjectID::FromPlasmaIdBinary(object_info.object_id);
HandleObjectLocal(object_id);
}));
RAY_CHECK_OK(object_manager_.SubscribeObjDeleted(
[this](const ObjectID &object_id) { HandleObjectMissing(object_id); }));
if (new_scheduler_enabled_) {
SchedulingResources &local_resources = cluster_resource_map_[self_node_id_];
new_resource_scheduler_ =
std::shared_ptr<ClusterResourceScheduler>(new ClusterResourceScheduler(
self_node_id_.Binary(),
local_resources.GetTotalResources().GetResourceMap()));
}
RAY_ARROW_CHECK_OK(store_client_.Connect(config.store_socket_name.c_str()));
// Run the node manger rpc server.
node_manager_server_.RegisterService(node_manager_service_);
node_manager_server_.Run();
}
ray::Status NodeManager::RegisterGcs() {
// The TaskLease subscription is done on demand in reconstruction policy.
// Register a callback to handle actor notifications.
auto actor_notification_callback = [this](const ActorID &actor_id,
const ActorTableData &data) {
HandleActorStateTransition(actor_id, ActorRegistration(data));
};
RAY_RETURN_NOT_OK(
gcs_client_->Actors().AsyncSubscribeAll(actor_notification_callback, nullptr));
auto on_node_change = [this](const ClientID &node_id, const GcsNodeInfo &data) {
if (data.state() == GcsNodeInfo::ALIVE) {
NodeAdded(data);
} else {
RAY_CHECK(data.state() == GcsNodeInfo::DEAD);
NodeRemoved(data);
}
};
// Register a callback to monitor new nodes and a callback to monitor removed nodes.
RAY_RETURN_NOT_OK(
gcs_client_->Nodes().AsyncSubscribeToNodeChange(on_node_change, nullptr));
// Subscribe to resource changes.
const auto &resources_changed =
[this](const ClientID &id,
const gcs::ResourceChangeNotification &resource_notification) {
if (resource_notification.IsAdded()) {
ResourceSet resource_set;
for (auto &entry : resource_notification.GetData()) {
resource_set.AddOrUpdateResource(entry.first,
entry.second->resource_capacity());
}
ResourceCreateUpdated(id, resource_set);
} else {
RAY_CHECK(resource_notification.IsRemoved());
std::vector<std::string> resource_names;
for (auto &entry : resource_notification.GetData()) {
resource_names.push_back(entry.first);
}
ResourceDeleted(id, resource_names);
}
};
RAY_RETURN_NOT_OK(gcs_client_->Nodes().AsyncSubscribeToResources(
/*subscribe_callback=*/resources_changed,
/*done_callback=*/nullptr));
// Subscribe to heartbeat batches from the monitor.
const auto &heartbeat_batch_added =
[this](const HeartbeatBatchTableData &heartbeat_batch) {
HeartbeatBatchAdded(heartbeat_batch);
};
RAY_RETURN_NOT_OK(gcs_client_->Nodes().AsyncSubscribeBatchHeartbeat(
heartbeat_batch_added, /*done*/ nullptr));
// Subscribe to all unexpected failure notifications from the local and
// remote raylets. Note that this does not include workers that failed due to
// node failure. These workers can be identified by comparing the raylet_id
// in their rpc::Address to the ID of a failed raylet.
const auto &worker_failure_handler =
[this](const WorkerID &id, const gcs::WorkerFailureData &worker_failure_data) {
HandleUnexpectedWorkerFailure(id, worker_failure_data);
};
RAY_CHECK_OK(gcs_client_->Workers().AsyncSubscribeToWorkerFailures(
worker_failure_handler, /*done_callback=*/nullptr));
// Subscribe to job updates.
const auto job_subscribe_handler = [this](const JobID &job_id,
const JobTableData &job_data) {
HandleJobFinished(job_id, job_data);
};
RAY_RETURN_NOT_OK(
gcs_client_->Jobs().AsyncSubscribeToFinishedJobs(job_subscribe_handler, nullptr));
// Start sending heartbeats to the GCS.
last_heartbeat_at_ms_ = current_time_ms();
last_debug_dump_at_ms_ = current_time_ms();
Heartbeat();
// Start the timer that gets object manager profiling information and sends it
// to the GCS.
GetObjectManagerProfileInfo();
return ray::Status::OK();
}
void NodeManager::HandleUnexpectedWorkerFailure(
const WorkerID &worker_id, const gcs::WorkerFailureData &worker_failed_data) {
RAY_LOG(DEBUG) << "Worker " << worker_id << " failed";
// TODO: Clean up after the failure: If the failed worker is our owner, then exit.
}
void NodeManager::KillWorker(std::shared_ptr<Worker> worker) {
#ifdef _WIN32
// TODO(mehrdadn): Implement implement graceful process termination mechanism
#else
// If we're just cleaning up a single worker, allow it some time to clean
// up its state before force killing. The client socket will be closed
// and the worker struct will be freed after the timeout.
kill(worker->Process().get()->id(), SIGTERM);
#endif
auto retry_timer = std::make_shared<boost::asio::deadline_timer>(io_service_);
auto retry_duration = boost::posix_time::milliseconds(
RayConfig::instance().kill_worker_timeout_milliseconds());
retry_timer->expires_from_now(retry_duration);
retry_timer->async_wait([retry_timer, worker](const boost::system::error_code &error) {
RAY_LOG(DEBUG) << "Send SIGKILL to worker, pid=" << worker->Process().get()->id();
// Force kill worker
worker->Process().get()->terminate();
});
}
void NodeManager::HandleJobFinished(const JobID &job_id, const JobTableData &job_data) {
RAY_LOG(DEBUG) << "HandleJobFinished " << job_id;
RAY_CHECK(job_data.is_dead());
auto workers = worker_pool_.GetWorkersRunningTasksForJob(job_id);
// Kill all the workers. The actual cleanup for these workers is done
// later when we receive the DisconnectClient message from them.
for (const auto &worker : workers) {
if (!worker->IsDetachedActor()) {
// Clean up any open ray.wait calls that the worker made.
task_dependency_manager_.UnsubscribeWaitDependencies(worker->WorkerId());
// Mark the worker as dead so further messages from it are ignored
// (except DisconnectClient).
worker->MarkDead();
// Then kill the worker process.
KillWorker(worker);
}
}
// Remove all tasks for this job from the scheduling queues, mark
// the results for these tasks as not required, cancel any attempts
// at reconstruction. Note that at this time the workers are likely
// alive because of the delay in killing workers.
auto tasks_to_remove = local_queues_.GetTaskIdsForJob(job_id);
task_dependency_manager_.RemoveTasksAndRelatedObjects(tasks_to_remove);
// NOTE(swang): SchedulingQueue::RemoveTasks modifies its argument so we must
// call it last.
local_queues_.RemoveTasks(tasks_to_remove);
}
void NodeManager::Heartbeat() {
uint64_t now_ms = current_time_ms();
uint64_t interval = now_ms - last_heartbeat_at_ms_;
if (interval > RayConfig::instance().num_heartbeats_warning() *
RayConfig::instance().raylet_heartbeat_timeout_milliseconds()) {
RAY_LOG(WARNING) << "Last heartbeat was sent " << interval << " ms ago ";
}
last_heartbeat_at_ms_ = now_ms;
auto heartbeat_data = std::make_shared<HeartbeatTableData>();
SchedulingResources &local_resources = cluster_resource_map_[self_node_id_];
heartbeat_data->set_client_id(self_node_id_.Binary());
// TODO(atumanov): modify the heartbeat table protocol to use the ResourceSet directly.
// TODO(atumanov): implement a ResourceSet const_iterator.
for (const auto &resource_pair :
local_resources.GetAvailableResources().GetResourceMap()) {
heartbeat_data->add_resources_available_label(resource_pair.first);
heartbeat_data->add_resources_available_capacity(resource_pair.second);
}
for (const auto &resource_pair : local_resources.GetTotalResources().GetResourceMap()) {
heartbeat_data->add_resources_total_label(resource_pair.first);
heartbeat_data->add_resources_total_capacity(resource_pair.second);
}
local_resources.SetLoadResources(local_queues_.GetResourceLoad());
for (const auto &resource_pair : local_resources.GetLoadResources().GetResourceMap()) {
heartbeat_data->add_resource_load_label(resource_pair.first);
heartbeat_data->add_resource_load_capacity(resource_pair.second);
}
size_t max_size = RayConfig::instance().raylet_max_active_object_ids();
std::unordered_set<ObjectID> active_object_ids = worker_pool_.GetActiveObjectIDs();
if (active_object_ids.size() <= max_size) {
for (const auto &object_id : active_object_ids) {
heartbeat_data->add_active_object_id(object_id.Binary());
}
} else {
// If there are more than the configured maximum number of object IDs to send per
// heartbeat, sample from them randomly.
// TODO(edoakes): we might want to improve the sampling technique here, for example
// preferring object IDs with the earliest last-refreshed timestamp.
std::vector<ObjectID> downsampled;
random_sample(active_object_ids.begin(), active_object_ids.end(), max_size,
&downsampled);
for (const auto &object_id : downsampled) {
heartbeat_data->add_active_object_id(object_id.Binary());
}
}
ray::Status status = gcs_client_->Nodes().AsyncReportHeartbeat(heartbeat_data,
/*done*/ nullptr);
RAY_CHECK_OK_PREPEND(status, "Heartbeat failed");
if (debug_dump_period_ > 0 &&
static_cast<int64_t>(now_ms - last_debug_dump_at_ms_) > debug_dump_period_) {
DumpDebugState();
RecordMetrics();
WarnResourceDeadlock();
last_debug_dump_at_ms_ = now_ms;
}
// Reset the timer.
heartbeat_timer_.expires_from_now(heartbeat_period_);
heartbeat_timer_.async_wait([this](const boost::system::error_code &error) {
RAY_CHECK(!error);
Heartbeat();
});
}
// TODO(edoakes): this function is problematic because it both sends warnings spuriously
// under normal conditions and sometimes doesn't send a warning under actual deadlock
// conditions. The current logic is to push a warning when: all running tasks are
// blocked, there is at least one ready task, and a warning hasn't been pushed in
// debug_dump_period_ milliseconds.
// See https://github.com/ray-project/ray/issues/5790 for details.
void NodeManager::WarnResourceDeadlock() {
// Check if any progress is being made on this raylet.
for (const auto &task : local_queues_.GetTasks(TaskState::RUNNING)) {
// Ignore blocked tasks.
if (local_queues_.GetBlockedTaskIds().count(task.GetTaskSpecification().TaskId())) {
continue;
}
// Progress is being made, don't warn.
resource_deadlock_warned_ = false;
return;
}
// suppress duplicates warning messages
if (resource_deadlock_warned_) {
return;
}
// The node is full of actors and no progress has been made for some time.
// If there are any pending tasks, build a warning.
std::ostringstream error_message;
ray::Task exemplar;
bool should_warn = false;
int pending_actor_creations = 0;
int pending_tasks = 0;
// See if any tasks are blocked trying to acquire resources.
for (const auto &task : local_queues_.GetTasks(TaskState::READY)) {
const TaskSpecification &spec = task.GetTaskSpecification();
if (spec.IsActorCreationTask()) {
pending_actor_creations += 1;
} else {
pending_tasks += 1;
}
if (!should_warn) {
exemplar = task;
should_warn = true;
}
}
// Push an warning to the driver that a task is blocked trying to acquire resources.
if (should_warn) {
SchedulingResources &local_resources = cluster_resource_map_[self_node_id_];
error_message
<< "The actor or task with ID " << exemplar.GetTaskSpecification().TaskId()
<< " is pending and cannot currently be scheduled. It requires "
<< exemplar.GetTaskSpecification().GetRequiredResources().ToString()
<< " for execution and "
<< exemplar.GetTaskSpecification().GetRequiredPlacementResources().ToString()
<< " for placement, but this node only has remaining "
<< local_resources.GetAvailableResources().ToString() << ". In total there are "
<< pending_tasks << " pending tasks and " << pending_actor_creations
<< " pending actors on this node. "
<< "This is likely due to all cluster resources being claimed by actors. "
<< "To resolve the issue, consider creating fewer actors or increase the "
<< "resources available to this Ray cluster. You can ignore this message "
<< "if this Ray cluster is expected to auto-scale.";
auto error_data_ptr = gcs::CreateErrorTableData(
"resource_deadlock", error_message.str(), current_time_ms(),
exemplar.GetTaskSpecification().JobId());
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
resource_deadlock_warned_ = true;
}
}
void NodeManager::GetObjectManagerProfileInfo() {
int64_t start_time_ms = current_time_ms();
auto profile_info = object_manager_.GetAndResetProfilingInfo();
if (profile_info->profile_events_size() > 0) {
RAY_CHECK_OK(gcs_client_->Stats().AsyncAddProfileData(profile_info, nullptr));
}
// Reset the timer.
object_manager_profile_timer_.expires_from_now(heartbeat_period_);
object_manager_profile_timer_.async_wait(
[this](const boost::system::error_code &error) {
RAY_CHECK(!error);
GetObjectManagerProfileInfo();
});
int64_t interval = current_time_ms() - start_time_ms;
if (interval > RayConfig::instance().handler_warning_timeout_ms()) {
RAY_LOG(WARNING) << "GetObjectManagerProfileInfo handler took " << interval << " ms.";
}
}
void NodeManager::NodeAdded(const GcsNodeInfo &node_info) {
const ClientID node_id = ClientID::FromBinary(node_info.node_id());
RAY_LOG(DEBUG) << "[NodeAdded] Received callback from client id " << node_id;
if (node_id == self_node_id_) {
// We got a notification for ourselves, so we are connected to the GCS now.
// Save this NodeManager's resource information in the cluster resource map.
cluster_resource_map_[node_id] = initial_config_.resource_config;
return;
}
auto entry = remote_node_manager_clients_.find(node_id);
if (entry != remote_node_manager_clients_.end()) {
RAY_LOG(DEBUG) << "Received notification of a new client that already exists: "
<< node_id;
return;
}
// Initialize a rpc client to the new node manager.
std::unique_ptr<rpc::NodeManagerClient> client(
new rpc::NodeManagerClient(node_info.node_manager_address(),
node_info.node_manager_port(), client_call_manager_));
remote_node_manager_clients_.emplace(node_id, std::move(client));
// Fetch resource info for the remote client and update cluster resource map.
RAY_CHECK_OK(gcs_client_->Nodes().AsyncGetResources(
node_id,
[this, node_id](Status status,
const boost::optional<gcs::NodeInfoAccessor::ResourceMap> &data) {
if (data) {
ResourceSet resource_set;
for (auto &resource_entry : *data) {
resource_set.AddOrUpdateResource(resource_entry.first,
resource_entry.second->resource_capacity());
}
ResourceCreateUpdated(node_id, resource_set);
}
}));
}
void NodeManager::NodeRemoved(const GcsNodeInfo &node_info) {
// TODO(swang): If we receive a notification for our own death, clean up and
// exit immediately.
const ClientID node_id = ClientID::FromBinary(node_info.node_id());
RAY_LOG(DEBUG) << "[NodeRemoved] Received callback from client id " << node_id;
RAY_CHECK(node_id != self_node_id_)
<< "Exiting because this node manager has mistakenly been marked dead by the "
<< "monitor.";
// Below, when we remove node_id from all of these data structures, we could
// check that it is actually removed, or log a warning otherwise, but that may
// not be necessary.
// Remove the client from the resource map.
cluster_resource_map_.erase(node_id);
// Remove the node manager client.
const auto client_entry = remote_node_manager_clients_.find(node_id);
if (client_entry != remote_node_manager_clients_.end()) {
remote_node_manager_clients_.erase(client_entry);
} else {
RAY_LOG(WARNING) << "Received NodeRemoved callback for an unknown client " << node_id
<< ".";
}
// For any live actors that were on the dead node, broadcast a notification
// about the actor's death
// TODO(swang): This could be very slow if there are many actors.
for (const auto &actor_entry : actor_registry_) {
if (actor_entry.second.GetNodeManagerId() == node_id &&
actor_entry.second.GetState() == ActorTableData::ALIVE) {
RAY_LOG(INFO) << "Actor " << actor_entry.first
<< " is disconnected, because its node " << node_id
<< " is removed from cluster. It may be reconstructed.";
HandleDisconnectedActor(actor_entry.first, /*was_local=*/false,
/*intentional_disconnect=*/false);
}
}
// Notify the object directory that the client has been removed so that it
// can remove it from any cached locations.
object_directory_->HandleClientRemoved(node_id);
// Flush all uncommitted tasks from the local lineage cache. This is to
// guarantee that all tasks get flushed eventually, in case one of the tasks
// in our local cache was supposed to be flushed by the node that died.
lineage_cache_.FlushAllUncommittedTasks();
}
void NodeManager::ResourceCreateUpdated(const ClientID &client_id,
const ResourceSet &createUpdatedResources) {
RAY_LOG(DEBUG) << "[ResourceCreateUpdated] received callback from client id "
<< client_id << " with created or updated resources: "
<< createUpdatedResources.ToString() << ". Updating resource map.";
SchedulingResources &cluster_schedres = cluster_resource_map_[client_id];
// Update local_available_resources_ and SchedulingResources
for (const auto &resource_pair : createUpdatedResources.GetResourceMap()) {
const std::string &resource_label = resource_pair.first;
const double &new_resource_capacity = resource_pair.second;
cluster_schedres.UpdateResourceCapacity(resource_label, new_resource_capacity);
if (client_id == self_node_id_) {
local_available_resources_.AddOrUpdateResource(resource_label,
new_resource_capacity);
}
if (new_scheduler_enabled_) {
new_resource_scheduler_->UpdateResourceCapacity(client_id.Binary(), resource_label,
new_resource_capacity);
}
}
RAY_LOG(DEBUG) << "[ResourceCreateUpdated] Updated cluster_resource_map.";
if (client_id == self_node_id_) {
// The resource update is on the local node, check if we can reschedule tasks.
TryLocalInfeasibleTaskScheduling();
}
return;
}
void NodeManager::ResourceDeleted(const ClientID &client_id,
const std::vector<std::string> &resource_names) {
if (RAY_LOG_ENABLED(DEBUG)) {
std::ostringstream oss;
for (auto &resource_name : resource_names) {
oss << resource_name << ", ";
}
RAY_LOG(DEBUG) << "[ResourceDeleted] received callback from client id " << client_id
<< " with deleted resources: " << oss.str()
<< ". Updating resource map.";
}
SchedulingResources &cluster_schedres = cluster_resource_map_[client_id];
// Update local_available_resources_ and SchedulingResources
for (const auto &resource_label : resource_names) {
cluster_schedres.DeleteResource(resource_label);
if (client_id == self_node_id_) {
local_available_resources_.DeleteResource(resource_label);
}
if (new_scheduler_enabled_) {
new_resource_scheduler_->DeleteResource(client_id.Binary(), resource_label);
}
}
RAY_LOG(DEBUG) << "[ResourceDeleted] Updated cluster_resource_map.";
return;
}
void NodeManager::TryLocalInfeasibleTaskScheduling() {
RAY_LOG(DEBUG) << "[LocalResourceUpdateRescheduler] The resource update is on the "
"local node, check if we can reschedule tasks";
SchedulingResources &new_local_resources = cluster_resource_map_[self_node_id_];
// SpillOver locally to figure out which infeasible tasks can be placed now
std::vector<TaskID> decision = scheduling_policy_.SpillOver(new_local_resources);
std::unordered_set<TaskID> local_task_ids(decision.begin(), decision.end());
// Transition locally placed tasks to waiting or ready for dispatch.
if (local_task_ids.size() > 0) {
std::vector<Task> tasks = local_queues_.RemoveTasks(local_task_ids);
for (const auto &t : tasks) {
EnqueuePlaceableTask(t);
}
}
}
void NodeManager::HeartbeatAdded(const ClientID &client_id,
const HeartbeatTableData &heartbeat_data) {
// Locate the client id in remote client table and update available resources based on
// the received heartbeat information.
auto it = cluster_resource_map_.find(client_id);
if (it == cluster_resource_map_.end()) {
// Haven't received the client registration for this client yet, skip this heartbeat.
RAY_LOG(INFO) << "[HeartbeatAdded]: received heartbeat from unknown client id "
<< client_id;
return;
}
SchedulingResources &remote_resources = it->second;
ResourceSet remote_total(VectorFromProtobuf(heartbeat_data.resources_total_label()),
VectorFromProtobuf(heartbeat_data.resources_total_capacity()));
ResourceSet remote_available(
VectorFromProtobuf(heartbeat_data.resources_available_label()),
VectorFromProtobuf(heartbeat_data.resources_available_capacity()));
ResourceSet remote_load(VectorFromProtobuf(heartbeat_data.resource_load_label()),
VectorFromProtobuf(heartbeat_data.resource_load_capacity()));
// TODO(atumanov): assert that the load is a non-empty ResourceSet.
remote_resources.SetAvailableResources(std::move(remote_available));
// Extract the load information and save it locally.
remote_resources.SetLoadResources(std::move(remote_load));
if (new_scheduler_enabled_ && client_id != self_node_id_) {
new_resource_scheduler_->AddOrUpdateNode(client_id.Binary(),
remote_total.GetResourceMap(),
remote_available.GetResourceMap());
NewSchedulerSchedulePendingTasks();
return;
}
// Extract decision for this raylet.
auto decision = scheduling_policy_.SpillOver(remote_resources);
std::unordered_set<TaskID> local_task_ids;
for (const auto &task_id : decision) {
// (See design_docs/task_states.rst for the state transition diagram.)
Task task;
TaskState state;
if (!local_queues_.RemoveTask(task_id, &task, &state)) {
return;
}
// Since we are spilling back from the ready and waiting queues, we need
// to unsubscribe the dependencies.
if (state != TaskState::INFEASIBLE) {
// Don't unsubscribe for infeasible tasks because we never subscribed in
// the first place.
RAY_CHECK(task_dependency_manager_.UnsubscribeGetDependencies(task_id));
}
// Attempt to forward the task. If this fails to forward the task,
// the task will be resubmit locally.
ForwardTaskOrResubmit(task, client_id);
}
}
void NodeManager::HeartbeatBatchAdded(const HeartbeatBatchTableData &heartbeat_batch) {
// Update load information provided by each heartbeat.
// TODO(edoakes): this isn't currently used, but will be used to refresh the LRU
// cache in the object store.
std::unordered_set<ObjectID> active_object_ids;
for (const auto &heartbeat_data : heartbeat_batch.batch()) {
for (int i = 0; i < heartbeat_data.active_object_id_size(); i++) {
active_object_ids.insert(ObjectID::FromBinary(heartbeat_data.active_object_id(i)));
}
const ClientID &client_id = ClientID::FromBinary(heartbeat_data.client_id());
if (client_id == self_node_id_) {
// Skip heartbeats from self.
continue;
}
HeartbeatAdded(client_id, heartbeat_data);
}
// Refresh the active object IDs in plasma to prevent them from being evicted.
std::vector<plasma::ObjectID> plasma_ids;
plasma_ids.reserve(active_object_ids.size());
for (const ObjectID &object_id : active_object_ids) {
plasma_ids.push_back(object_id.ToPlasmaId());
}
if (!store_client_.Refresh(plasma_ids).ok()) {
RAY_LOG(WARNING) << "Failed to refresh active object IDs in plasma.";
}
}
void NodeManager::HandleActorStateTransition(const ActorID &actor_id,
ActorRegistration &&actor_registration) {
// Update local registry.
auto it = actor_registry_.find(actor_id);
if (it == actor_registry_.end()) {
it = actor_registry_.emplace(actor_id, actor_registration).first;
} else {
// Only process the state transition if it is to a later state than ours.
if (actor_registration.GetState() > it->second.GetState() &&
actor_registration.GetRemainingReconstructions() ==
it->second.GetRemainingReconstructions()) {
// The new state is later than ours if it is about the same lifetime, but
// a greater state.
it->second = actor_registration;
} else if (actor_registration.GetRemainingReconstructions() <
it->second.GetRemainingReconstructions()) {
// The new state is also later than ours it is about a later lifetime of
// the actor.
it->second = actor_registration;
} else {
// Our state is already at or past the update, so skip the update.
return;
}
}
RAY_LOG(DEBUG) << "Actor notification received: actor_id = " << actor_id
<< ", node_manager_id = " << actor_registration.GetNodeManagerId()
<< ", state = "
<< ActorTableData::ActorState_Name(actor_registration.GetState())
<< ", remaining_reconstructions = "
<< actor_registration.GetRemainingReconstructions();
if (actor_registration.GetState() == ActorTableData::ALIVE) {
// The actor is now alive (created for the first time or reconstructed). We can
// stop listening for the actor creation task. This is needed because we use
// `ListenAndMaybeReconstruct` to reconstruct the actor.
reconstruction_policy_.Cancel(actor_registration.GetActorCreationDependency());
// The actor's location is now known. Dequeue any methods that were
// submitted before the actor's location was known.
// (See design_docs/task_states.rst for the state transition diagram.)
const auto &methods = local_queues_.GetTasks(TaskState::WAITING_FOR_ACTOR_CREATION);
std::unordered_set<TaskID> created_actor_method_ids;
for (const auto &method : methods) {
if (method.GetTaskSpecification().ActorId() == actor_id) {
created_actor_method_ids.insert(method.GetTaskSpecification().TaskId());
}
}
// Resubmit the methods that were submitted before the actor's location was
// known.
auto created_actor_methods = local_queues_.RemoveTasks(created_actor_method_ids);
for (const auto &method : created_actor_methods) {
// Maintain the invariant that if a task is in the
// MethodsWaitingForActorCreation queue, then it is subscribed to its
// respective actor creation task. Since the actor location is now known,
// we can remove the task from the queue and forget its dependency on the
// actor creation task.
RAY_CHECK(task_dependency_manager_.UnsubscribeGetDependencies(
method.GetTaskSpecification().TaskId()));
// The task's uncommitted lineage was already added to the local lineage
// cache upon the initial submission, so it's okay to resubmit it with an
// empty lineage this time.
SubmitTask(method, Lineage());
}
} else if (actor_registration.GetState() == ActorTableData::DEAD) {
// When an actor dies, loop over all of the queued tasks for that actor
// and treat them as failed.
auto tasks_to_remove = local_queues_.GetTaskIdsForActor(actor_id);
auto removed_tasks = local_queues_.RemoveTasks(tasks_to_remove);
for (auto const &task : removed_tasks) {
TreatTaskAsFailed(task, ErrorType::ACTOR_DIED);
}
} else {
RAY_CHECK(actor_registration.GetState() == ActorTableData::RECONSTRUCTING);
RAY_LOG(DEBUG) << "Actor is being reconstructed: " << actor_id;
// The actor is dead and needs reconstruction. Attempting to reconstruct its
// creation task.
reconstruction_policy_.ListenAndMaybeReconstruct(
actor_registration.GetActorCreationDependency());
// When an actor fails but can be reconstructed, resubmit all of the queued
// tasks for that actor. This will mark the tasks as waiting for actor
// creation.
auto tasks_to_remove = local_queues_.GetTaskIdsForActor(actor_id);
auto removed_tasks = local_queues_.RemoveTasks(tasks_to_remove);
for (auto const &task : removed_tasks) {
SubmitTask(task, Lineage());
}
}
}
void NodeManager::ProcessNewClient(LocalClientConnection &client) {
// The new client is a worker, so begin listening for messages.
client.ProcessMessages();
}
// A helper function to create a mapping from task scheduling class to
// tasks with that class from a given list of tasks.
std::unordered_map<SchedulingClass, ordered_set<TaskID>> MakeTasksByClass(
const std::vector<Task> &tasks) {
std::unordered_map<SchedulingClass, ordered_set<TaskID>> result;
for (const auto &task : tasks) {
auto spec = task.GetTaskSpecification();
result[spec.GetSchedulingClass()].push_back(spec.TaskId());
}
return result;
}
void NodeManager::DispatchTasks(
const std::unordered_map<SchedulingClass, ordered_set<TaskID>> &tasks_by_class) {
// Dispatch tasks in priority order by class. This avoids starvation problems where
// one class of tasks become stuck behind others in the queue, causing Ray to start
// many workers. See #3644 for a more detailed description of this issue.
std::vector<const std::pair<const SchedulingClass, ordered_set<TaskID>> *> fair_order;
for (auto &it : tasks_by_class) {
fair_order.emplace_back(&it);
}
// Prioritize classes that have fewer currently running tasks. Note that we only
// sort once per round of task dispatch, which is less fair then it could be, but
// is simpler and faster.
if (fair_queueing_enabled_) {
std::sort(
std::begin(fair_order), std::end(fair_order),
[this](const std::pair<const SchedulingClass, ordered_set<ray::TaskID>> *a,
const std::pair<const SchedulingClass, ordered_set<ray::TaskID>> *b) {
return local_queues_.NumRunning(a->first) < local_queues_.NumRunning(b->first);
});
}
std::vector<std::function<void()>> post_assign_callbacks;
// Approximate fair round robin between classes.
for (const auto &it : fair_order) {
const auto &task_resources =
TaskSpecification::GetSchedulingClassDescriptor(it->first).first;
// FIFO order within each class.
for (const auto &task_id : it->second) {
const auto &task = local_queues_.GetTaskOfState(task_id, TaskState::READY);
if (!local_available_resources_.Contains(task_resources)) {
// All the tasks in it.second have the same resource shape, so
// once the first task is not feasible, we can break out of this loop
break;
}
// Try to get an idle worker to execute this task. If nullptr, there
// aren't any available workers so we can't assign the task.
std::shared_ptr<Worker> worker =
worker_pool_.PopWorker(task.GetTaskSpecification());
if (worker != nullptr) {
AssignTask(worker, task, &post_assign_callbacks);
}
}
}
// Call the callbacks from the AssignTask calls above. These need to be called
// after the above loop, as they may alter the scheduling queues and invalidate
// the loop iterator.
for (auto &func : post_assign_callbacks) {
func();
}
}
void NodeManager::ProcessClientMessage(
const std::shared_ptr<LocalClientConnection> &client, int64_t message_type,
const uint8_t *message_data) {
auto registered_worker = worker_pool_.GetRegisteredWorker(client);
auto message_type_value = static_cast<protocol::MessageType>(message_type);
RAY_LOG(DEBUG) << "[Worker] Message "
<< protocol::EnumNameMessageType(message_type_value) << "("
<< message_type << ") from worker with PID "
<< (registered_worker
? std::to_string(registered_worker->Process().get()->id())
: "nil");
if (registered_worker && registered_worker->IsDead()) {
// For a worker that is marked as dead (because the job has died already),
// all the messages are ignored except DisconnectClient.
if ((message_type_value != protocol::MessageType::DisconnectClient) &&
(message_type_value != protocol::MessageType::IntentionalDisconnectClient)) {
// Listen for more messages.
client->ProcessMessages();
return;
}
}
switch (message_type_value) {
case protocol::MessageType::RegisterClientRequest: {
ProcessRegisterClientRequestMessage(client, message_data);
} break;
case protocol::MessageType::TaskDone: {
HandleWorkerAvailable(client);
} break;
case protocol::MessageType::DisconnectClient: {
ProcessDisconnectClientMessage(client);
// We don't need to receive future messages from this client,
// because it's already disconnected.
return;
} break;
case protocol::MessageType::IntentionalDisconnectClient: {
ProcessDisconnectClientMessage(client, /* intentional_disconnect = */ true);
// We don't need to receive future messages from this client,
// because it's already disconnected.
return;
} break;
case protocol::MessageType::SubmitTask: {
// For tasks submitted via the raylet path, we must make sure to order the
// task submission so that tasks are always submitted after the tasks that
// they depend on.
ProcessSubmitTaskMessage(message_data);
} break;
case protocol::MessageType::SetResourceRequest: {
ProcessSetResourceRequest(client, message_data);
} break;
case protocol::MessageType::FetchOrReconstruct: {
ProcessFetchOrReconstructMessage(client, message_data);
} break;
case protocol::MessageType::NotifyDirectCallTaskBlocked: {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
HandleDirectCallTaskBlocked(worker);
} break;
case protocol::MessageType::NotifyDirectCallTaskUnblocked: {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
HandleDirectCallTaskUnblocked(worker);
} break;
case protocol::MessageType::NotifyUnblocked: {
auto message = flatbuffers::GetRoot<protocol::NotifyUnblocked>(message_data);
AsyncResolveObjectsFinish(client, from_flatbuf<TaskID>(*message->task_id()),
/*was_blocked*/ true);
} break;
case protocol::MessageType::WaitRequest: {
ProcessWaitRequestMessage(client, message_data);
} break;
case protocol::MessageType::WaitForDirectActorCallArgsRequest: {
ProcessWaitForDirectActorCallArgsRequestMessage(client, message_data);
} break;
case protocol::MessageType::PushErrorRequest: {
ProcessPushErrorRequestMessage(message_data);
} break;
case protocol::MessageType::PushProfileEventsRequest: {
auto fbs_message = flatbuffers::GetRoot<flatbuffers::String>(message_data);
auto profile_table_data = std::make_shared<rpc::ProfileTableData>();
RAY_CHECK(
profile_table_data->ParseFromArray(fbs_message->data(), fbs_message->size()));
RAY_CHECK_OK(gcs_client_->Stats().AsyncAddProfileData(profile_table_data, nullptr));
} break;
case protocol::MessageType::FreeObjectsInObjectStoreRequest: {
auto message = flatbuffers::GetRoot<protocol::FreeObjectsRequest>(message_data);
std::vector<ObjectID> object_ids = from_flatbuf<ObjectID>(*message->object_ids());
// Clean up objects from the object store.
object_manager_.FreeObjects(object_ids, message->local_only());
if (message->delete_creating_tasks()) {
// Clean up their creating tasks from GCS.
std::vector<TaskID> creating_task_ids;
for (const auto &object_id : object_ids) {
creating_task_ids.push_back(object_id.TaskId());
}
RAY_CHECK_OK(gcs_client_->Tasks().AsyncDelete(creating_task_ids, nullptr));
}
} break;
case protocol::MessageType::PrepareActorCheckpointRequest: {
ProcessPrepareActorCheckpointRequest(client, message_data);
} break;
case protocol::MessageType::NotifyActorResumedFromCheckpoint: {
ProcessNotifyActorResumedFromCheckpoint(message_data);
} break;
case protocol::MessageType::ReportActiveObjectIDs: {
ProcessReportActiveObjectIDs(client, message_data);
} break;
default:
RAY_LOG(FATAL) << "Received unexpected message type " << message_type;
}
// Listen for more messages.
client->ProcessMessages();
}
void NodeManager::ProcessRegisterClientRequestMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
client->Register();
flatbuffers::FlatBufferBuilder fbb;
auto reply =
ray::protocol::CreateRegisterClientReply(fbb, to_flatbuf(fbb, self_node_id_));
fbb.Finish(reply);
client->WriteMessageAsync(
static_cast<int64_t>(protocol::MessageType::RegisterClientReply), fbb.GetSize(),
fbb.GetBufferPointer(), [this, client](const ray::Status &status) {
if (!status.ok()) {
RAY_LOG(WARNING)
<< "Failed to send RegisterClientReply to client, so disconnecting";
ProcessDisconnectClientMessage(client);
}
});
auto message = flatbuffers::GetRoot<protocol::RegisterClientRequest>(message_data);
Language language = static_cast<Language>(message->language());
WorkerID worker_id = from_flatbuf<WorkerID>(*message->worker_id());
pid_t pid = message->worker_pid();
auto worker = std::make_shared<Worker>(worker_id, language, message->port(), client,
client_call_manager_);
if (message->is_worker()) {
// Register the new worker.
if (worker_pool_.RegisterWorker(worker, pid).ok()) {
HandleWorkerAvailable(worker->Connection());
}
} else {
// Register the new driver.
worker->SetProcess(ProcessHandle::FromPid(pid));
const JobID job_id = from_flatbuf<JobID>(*message->job_id());
// Compute a dummy driver task id from a given driver.
const TaskID driver_task_id = TaskID::ComputeDriverTaskId(worker_id);
worker->AssignTaskId(driver_task_id);
worker->AssignJobId(job_id);
Status status = worker_pool_.RegisterDriver(worker);
if (status.ok()) {
local_queues_.AddDriverTaskId(driver_task_id);
auto job_data_ptr =
gcs::CreateJobTableData(job_id, /*is_dead*/ false, std::time(nullptr),
initial_config_.node_manager_address, pid);
RAY_CHECK_OK(gcs_client_->Jobs().AsyncAdd(job_data_ptr, nullptr));
}
}
}
void NodeManager::HandleDisconnectedActor(const ActorID &actor_id, bool was_local,
bool intentional_disconnect) {
auto actor_entry = actor_registry_.find(actor_id);
RAY_CHECK(actor_entry != actor_registry_.end());
auto &actor_registration = actor_entry->second;
RAY_LOG(DEBUG) << "The actor with ID " << actor_id << " died "
<< (intentional_disconnect ? "intentionally" : "unintentionally")
<< ", remaining reconstructions = "
<< actor_registration.GetRemainingReconstructions();
// Check if this actor needs to be reconstructed.
ActorState new_state =
actor_registration.GetRemainingReconstructions() > 0 && !intentional_disconnect
? ActorTableData::RECONSTRUCTING
: ActorTableData::DEAD;
if (was_local) {
// Clean up the dummy objects from this actor.
RAY_LOG(DEBUG) << "Removing dummy objects for actor: " << actor_id;
for (auto &dummy_object_pair : actor_entry->second.GetDummyObjects()) {
HandleObjectMissing(dummy_object_pair.first);
}
}
// Update the actor's state.
ActorTableData new_actor_info = actor_entry->second.GetTableData();
new_actor_info.set_state(new_state);
if (was_local) {
// If the actor was local, immediately update the state in actor registry.
// So if we receive any actor tasks before we receive GCS notification,
// these tasks can be correctly routed to the `MethodsWaitingForActorCreation`
// queue, instead of being assigned to the dead actor.
HandleActorStateTransition(actor_id, ActorRegistration(new_actor_info));
}
auto done = [was_local, actor_id](Status status) {
if (was_local && !status.ok()) {
// If the disconnected actor was local, only this node will try to update actor
// state. So the update shouldn't fail.
RAY_LOG(FATAL) << "Failed to update state for actor " << actor_id
<< ", status: " << status.ToString();
}
};
auto actor_notification = std::make_shared<ActorTableData>(new_actor_info);
RAY_CHECK_OK(gcs_client_->Actors().AsyncUpdate(actor_id, actor_notification, done));
if (was_local && new_state == ActorTableData::RECONSTRUCTING) {
RAY_LOG(INFO) << "A local actor (id = " << actor_id
<< " ) is dead, reconstructing it.";
const ObjectID &actor_creation_dummy_object_id =
actor_registration.GetActorCreationDependency();
HandleTaskReconstruction(actor_creation_dummy_object_id.TaskId(),
actor_creation_dummy_object_id);
}
}
void NodeManager::HandleWorkerAvailable(
const std::shared_ptr<LocalClientConnection> &client) {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
HandleWorkerAvailable(worker);
}
void NodeManager::HandleWorkerAvailable(const std::shared_ptr<Worker> &worker) {
RAY_CHECK(worker);
bool worker_idle = true;
// If the worker was assigned a task, mark it as finished.
if (!worker->GetAssignedTaskId().IsNil()) {
worker_idle = FinishAssignedTask(*worker);
}
if (worker_idle) {
// Return the worker to the idle pool.
worker_pool_.PushWorker(worker);
}
if (new_scheduler_enabled_) {
DispatchScheduledTasksToWorkers();
} else {
// Local resource availability changed: invoke scheduling policy for local node.
cluster_resource_map_[self_node_id_].SetLoadResources(
local_queues_.GetResourceLoad());
// Call task dispatch to assign work to the new worker.
DispatchTasks(local_queues_.GetReadyTasksByClass());
}
}
void NodeManager::ProcessDisconnectClientMessage(
const std::shared_ptr<LocalClientConnection> &client, bool intentional_disconnect) {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
bool is_worker = false, is_driver = false;
if (worker) {
// The client is a worker.
is_worker = true;
} else {
worker = worker_pool_.GetRegisteredDriver(client);
if (worker) {
// The client is a driver.
is_driver = true;
} else {
RAY_LOG(INFO) << "Ignoring client disconnect because the client has already "
<< "been disconnected.";
}
}
RAY_CHECK(!(is_worker && is_driver));
// If the client has any blocked tasks, mark them as unblocked. In
// particular, we are no longer waiting for their dependencies.
if (worker) {
if (is_worker && worker->IsDead()) {
// Don't need to unblock the client if it's a worker and is already dead.
// Because in this case, its task is already cleaned up.
RAY_LOG(DEBUG) << "Skip unblocking worker because it's already dead.";
} else {
// Clean up any open ray.get calls that the worker made.
while (!worker->GetBlockedTaskIds().empty()) {
// NOTE(swang): AsyncResolveObjectsFinish will modify the worker, so it is
// not safe to pass in the iterator directly.
const TaskID task_id = *worker->GetBlockedTaskIds().begin();
AsyncResolveObjectsFinish(client, task_id, true);
}
// Clean up any open ray.wait calls that the worker made.
task_dependency_manager_.UnsubscribeWaitDependencies(worker->WorkerId());
}
// Erase any lease metadata.
leased_workers_.erase(worker->WorkerId());
// Publish the worker failure.
auto worker_failure_data_ptr = gcs::CreateWorkerFailureData(
self_node_id_, worker->WorkerId(), initial_config_.node_manager_address,
worker->Port());
RAY_CHECK_OK(gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr,
nullptr));
}
if (is_worker) {
// The client is a worker.
if (worker->IsDead()) {
// If the worker was killed by us because the driver exited,
// treat it as intentionally disconnected.
intentional_disconnect = true;
}
const ActorID &actor_id = worker->GetActorId();
if (!actor_id.IsNil()) {
// If the worker was an actor, update actor state, reconstruct the actor if needed,
// and clean up actor's tasks if the actor is permanently dead.
HandleDisconnectedActor(actor_id, true, intentional_disconnect);
}
const TaskID &task_id = worker->GetAssignedTaskId();
// If the worker was running a task or actor, clean up the task and push an
// error to the driver, unless the worker is already dead.
if ((!task_id.IsNil() || !actor_id.IsNil()) && !worker->IsDead()) {
// If the worker was an actor, the task was already cleaned up in
// `HandleDisconnectedActor`.
if (actor_id.IsNil()) {
Task task;
if (local_queues_.RemoveTask(task_id, &task)) {
TreatTaskAsFailed(task, ErrorType::WORKER_DIED);
}
}
if (!intentional_disconnect) {
// Push the error to driver.
const JobID &job_id = worker->GetAssignedJobId();
// TODO(rkn): Define this constant somewhere else.
std::string type = "worker_died";
std::ostringstream error_message;
error_message << "A worker died or was killed while executing task " << task_id
<< ".";
auto error_data_ptr = gcs::CreateErrorTableData(type, error_message.str(),
current_time_ms(), job_id);
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
}
// Remove the dead client from the pool and stop listening for messages.
worker_pool_.DisconnectWorker(worker);
// Return the resources that were being used by this worker.
auto const &task_resources = worker->GetTaskResourceIds();
local_available_resources_.ReleaseConstrained(
task_resources, cluster_resource_map_[self_node_id_].GetTotalResources());
cluster_resource_map_[self_node_id_].Release(task_resources.ToResourceSet());
worker->ResetTaskResourceIds();
auto const &lifetime_resources = worker->GetLifetimeResourceIds();
local_available_resources_.ReleaseConstrained(
lifetime_resources, cluster_resource_map_[self_node_id_].GetTotalResources());
cluster_resource_map_[self_node_id_].Release(lifetime_resources.ToResourceSet());
worker->ResetLifetimeResourceIds();
RAY_LOG(DEBUG) << "Worker (pid=" << worker->Process().get()->id()
<< ") is disconnected. "
<< "job_id: " << worker->GetAssignedJobId();
// Since some resources may have been released, we can try to dispatch more tasks.
DispatchTasks(local_queues_.GetReadyTasksByClass());
} else if (is_driver) {
// The client is a driver.
const auto job_id = worker->GetAssignedJobId();
RAY_CHECK(!job_id.IsNil());
RAY_CHECK_OK(gcs_client_->Jobs().AsyncMarkFinished(job_id, nullptr));
const auto driver_id = ComputeDriverIdFromJob(job_id);
local_queues_.RemoveDriverTaskId(TaskID::ComputeDriverTaskId(driver_id));
worker_pool_.DisconnectDriver(worker);
RAY_LOG(DEBUG) << "Driver (pid=" << worker->Process().get()->id()
<< ") is disconnected. "
<< "job_id: " << job_id;
}
client->Close();
// TODO(rkn): Tell the object manager that this client has disconnected so
// that it can clean up the wait requests for this client. Currently I think
// these can be leaked.
}
void NodeManager::ProcessFetchOrReconstructMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
auto message = flatbuffers::GetRoot<protocol::FetchOrReconstruct>(message_data);
std::vector<ObjectID> required_object_ids;
for (int64_t i = 0; i < message->object_ids()->size(); ++i) {
ObjectID object_id = from_flatbuf<ObjectID>(*message->object_ids()->Get(i));
if (message->fetch_only()) {
// If only a fetch is required, then do not subscribe to the
// dependencies to the task dependency manager.
if (!task_dependency_manager_.CheckObjectLocal(object_id)) {
// Fetch the object if it's not already local.
RAY_CHECK_OK(object_manager_.Pull(object_id));
}
} else {
// If reconstruction is also required, then add any requested objects to
// the list to subscribe to in the task dependency manager. These objects
// will be pulled from remote node managers and reconstructed if
// necessary.
required_object_ids.push_back(object_id);
}
}
if (!required_object_ids.empty()) {
const TaskID task_id = from_flatbuf<TaskID>(*message->task_id());
AsyncResolveObjects(client, required_object_ids, task_id, /*ray_get=*/true,
/*mark_worker_blocked*/ message->mark_worker_blocked());
}
}
void NodeManager::ProcessWaitRequestMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
// Read the data.
auto message = flatbuffers::GetRoot<protocol::WaitRequest>(message_data);
std::vector<ObjectID> object_ids = from_flatbuf<ObjectID>(*message->object_ids());
int64_t wait_ms = message->timeout();
uint64_t num_required_objects = static_cast<uint64_t>(message->num_ready_objects());
bool wait_local = message->wait_local();
std::vector<ObjectID> required_object_ids;
for (auto const &object_id : object_ids) {
if (!task_dependency_manager_.CheckObjectLocal(object_id)) {
// Add any missing objects to the list to subscribe to in the task
// dependency manager. These objects will be pulled from remote node
// managers and reconstructed if necessary.
required_object_ids.push_back(object_id);
}
}
const TaskID ¤t_task_id = from_flatbuf<TaskID>(*message->task_id());
bool resolve_objects = !required_object_ids.empty();
bool was_blocked = message->mark_worker_blocked();
if (resolve_objects) {
AsyncResolveObjects(client, required_object_ids, current_task_id, /*ray_get=*/false,
/*mark_worker_blocked*/ was_blocked);
}
ray::Status status = object_manager_.Wait(
object_ids, wait_ms, num_required_objects, wait_local,
[this, resolve_objects, was_blocked, client, current_task_id](
std::vector<ObjectID> found, std::vector<ObjectID> remaining) {
// Write the data.
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<protocol::WaitReply> wait_reply = protocol::CreateWaitReply(
fbb, to_flatbuf(fbb, found), to_flatbuf(fbb, remaining));
fbb.Finish(wait_reply);
auto status =
client->WriteMessage(static_cast<int64_t>(protocol::MessageType::WaitReply),
fbb.GetSize(), fbb.GetBufferPointer());
if (status.ok()) {
// The client is unblocked now because the wait call has returned.
if (resolve_objects) {
AsyncResolveObjectsFinish(client, current_task_id, was_blocked);
}
} else {
// We failed to write to the client, so disconnect the client.
RAY_LOG(WARNING)
<< "Failed to send WaitReply to client, so disconnecting client";
// We failed to send the reply to the client, so disconnect the worker.
ProcessDisconnectClientMessage(client);
}
});
RAY_CHECK_OK(status);
}
void NodeManager::ProcessWaitForDirectActorCallArgsRequestMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
// Read the data.
auto message =
flatbuffers::GetRoot<protocol::WaitForDirectActorCallArgsRequest>(message_data);
int64_t tag = message->tag();
std::vector<ObjectID> object_ids = from_flatbuf<ObjectID>(*message->object_ids());
std::vector<ObjectID> required_object_ids;
for (auto const &object_id : object_ids) {
if (!task_dependency_manager_.CheckObjectLocal(object_id)) {
// Add any missing objects to the list to subscribe to in the task
// dependency manager. These objects will be pulled from remote node
// managers and reconstructed if necessary.
required_object_ids.push_back(object_id);
}
}
ray::Status status = object_manager_.Wait(
object_ids, -1, object_ids.size(), false,
[this, client, tag](std::vector<ObjectID> found, std::vector<ObjectID> remaining) {
RAY_CHECK(remaining.empty());
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
if (worker == nullptr) {
RAY_LOG(ERROR) << "Lost worker for wait request " << client;
} else {
worker->DirectActorCallArgWaitComplete(tag);
}
});
RAY_CHECK_OK(status);
}
void NodeManager::ProcessPushErrorRequestMessage(const uint8_t *message_data) {
auto message = flatbuffers::GetRoot<protocol::PushErrorRequest>(message_data);
auto const &type = string_from_flatbuf(*message->type());
auto const &error_message = string_from_flatbuf(*message->error_message());
double timestamp = message->timestamp();
JobID job_id = from_flatbuf<JobID>(*message->job_id());
auto error_data_ptr = gcs::CreateErrorTableData(type, error_message, timestamp, job_id);
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
void NodeManager::ProcessPrepareActorCheckpointRequest(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
auto message =
flatbuffers::GetRoot<protocol::PrepareActorCheckpointRequest>(message_data);
ActorID actor_id = from_flatbuf<ActorID>(*message->actor_id());
RAY_LOG(DEBUG) << "Preparing checkpoint for actor " << actor_id;
const auto &actor_entry = actor_registry_.find(actor_id);
RAY_CHECK(actor_entry != actor_registry_.end());
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
RAY_CHECK(worker && worker->GetActorId() == actor_id);
std::shared_ptr<ActorCheckpointData> checkpoint_data;
if (actor_entry->second.GetTableData().is_direct_call()) {
checkpoint_data =
actor_entry->second.GenerateCheckpointData(actor_entry->first, nullptr);
} else {
// Find the task that is running on this actor.
const auto task_id = worker->GetAssignedTaskId();
const Task &task = local_queues_.GetTaskOfState(task_id, TaskState::RUNNING);
// Generate checkpoint data.
checkpoint_data =
actor_entry->second.GenerateCheckpointData(actor_entry->first, &task);
}
// Write checkpoint data to GCS.
RAY_CHECK_OK(gcs_client_->Actors().AsyncAddCheckpoint(
checkpoint_data, [worker, checkpoint_data](Status status) {
ActorCheckpointID checkpoint_id =
ActorCheckpointID::FromBinary(checkpoint_data->checkpoint_id());
RAY_CHECK(status.ok()) << "Add checkpoint failed, actor is "
<< worker->GetActorId() << " checkpoint_id is "
<< checkpoint_id;
RAY_LOG(DEBUG) << "Checkpoint " << checkpoint_id << " saved for actor "
<< worker->GetActorId();
// Send reply to worker.
flatbuffers::FlatBufferBuilder fbb;
auto reply = ray::protocol::CreatePrepareActorCheckpointReply(
fbb, to_flatbuf(fbb, checkpoint_id));
fbb.Finish(reply);
worker->Connection()->WriteMessageAsync(
static_cast<int64_t>(protocol::MessageType::PrepareActorCheckpointReply),
fbb.GetSize(), fbb.GetBufferPointer(), [](const ray::Status &status) {
if (!status.ok()) {
RAY_LOG(WARNING)
<< "Failed to send PrepareActorCheckpointReply to client";
}
});
}));
}
void NodeManager::ProcessNotifyActorResumedFromCheckpoint(const uint8_t *message_data) {
auto message =
flatbuffers::GetRoot<protocol::NotifyActorResumedFromCheckpoint>(message_data);
ActorID actor_id = from_flatbuf<ActorID>(*message->actor_id());
ActorCheckpointID checkpoint_id =
from_flatbuf<ActorCheckpointID>(*message->checkpoint_id());
RAY_LOG(DEBUG) << "Actor " << actor_id << " was resumed from checkpoint "
<< checkpoint_id;
checkpoint_id_to_restore_.emplace(actor_id, checkpoint_id);
}
void NodeManager::ProcessReportActiveObjectIDs(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
if (!worker) {
worker = worker_pool_.GetRegisteredDriver(client);
if (!worker) {
RAY_LOG(ERROR) << "Ignoring object ids report from failed / unknown worker.";
return;
}
}
auto message = flatbuffers::GetRoot<protocol::ReportActiveObjectIDs>(message_data);
worker->SetActiveObjectIds(
unordered_set_from_flatbuf<ObjectID>(*message->object_ids()));
}
void NodeManager::ProcessSubmitTaskMessage(const uint8_t *message_data) {
// Read the task submitted by the client.
auto fbs_message = flatbuffers::GetRoot<protocol::SubmitTaskRequest>(message_data);
rpc::Task task_message;
RAY_CHECK(task_message.mutable_task_spec()->ParseFromArray(
fbs_message->task_spec()->data(), fbs_message->task_spec()->size()));
// Submit the task to the raylet. Since the task was submitted
// locally, there is no uncommitted lineage.
SubmitTask(Task(task_message), Lineage());
}
void NodeManager::DispatchScheduledTasksToWorkers() {
RAY_CHECK(new_scheduler_enabled_);
while (!tasks_to_dispatch_.empty()) {
auto task = tasks_to_dispatch_.front();
auto reply = task.first;
auto spec = task.second.GetTaskSpecification();
std::shared_ptr<Worker> worker = worker_pool_.PopWorker(spec);
if (worker == nullptr) {
return;
}
bool schedulable = new_resource_scheduler_->SubtractNodeAvailableResources(
self_node_id_.Binary(), spec.GetRequiredResources().GetResourceMap());
if (!schedulable) {
return;
}
// Handle the allocation to specific resource IDs.
auto acquired_resources =
local_available_resources_.Acquire(spec.GetRequiredResources());
cluster_resource_map_[self_node_id_].Acquire(spec.GetRequiredResources());
if (spec.IsActorCreationTask()) {
worker->SetLifetimeResourceIds(acquired_resources);
} else {
worker->SetTaskResourceIds(acquired_resources);
}
reply(worker, ClientID::Nil(), "", -1);
tasks_to_dispatch_.pop_front();
}
}
void NodeManager::NewSchedulerSchedulePendingTasks() {
RAY_CHECK(new_scheduler_enabled_);
while (!tasks_to_schedule_.empty()) {
auto work = tasks_to_schedule_.front();
auto task = work.second;
auto request_resources =
task.GetTaskSpecification().GetRequiredResources().GetResourceMap();
int64_t violations = 0;
std::string node_id_string =
new_resource_scheduler_->GetBestSchedulableNode(request_resources, &violations);
if (node_id_string.empty()) {
/// There is no node that has available resources to run the request.
break;
} else {
if (node_id_string == self_node_id_.Binary()) {
WaitForTaskArgsRequests(work);
} else {
new_resource_scheduler_->SubtractNodeAvailableResources(node_id_string,
request_resources);
ClientID node_id = ClientID::FromBinary(node_id_string);
auto node_info_opt = gcs_client_->Nodes().Get(node_id);
RAY_CHECK(node_info_opt)
<< "Spilling back to a node manager, but no GCS info found for node "
<< node_id;
work.first(nullptr, node_id, node_info_opt->node_manager_address(),
node_info_opt->node_manager_port());
}
tasks_to_schedule_.pop_front();
}
}
DispatchScheduledTasksToWorkers();
}
void NodeManager::WaitForTaskArgsRequests(std::pair<ScheduleFn, Task> &work) {
RAY_CHECK(new_scheduler_enabled_);
std::vector<ObjectID> object_ids = work.second.GetTaskSpecification().GetDependencies();
if (object_ids.size() > 0) {
ray::Status status = object_manager_.Wait(
object_ids, -1, object_ids.size(), false,
[this, work](std::vector<ObjectID> found, std::vector<ObjectID> remaining) {
RAY_CHECK(remaining.empty());
tasks_to_dispatch_.push_back(work);
DispatchScheduledTasksToWorkers();
});
RAY_CHECK_OK(status);
} else {
tasks_to_dispatch_.push_back(work);
}
};
void NodeManager::HandleRequestWorkerLease(const rpc::RequestWorkerLeaseRequest &request,
rpc::RequestWorkerLeaseReply *reply,
rpc::SendReplyCallback send_reply_callback) {
rpc::Task task_message;
task_message.mutable_task_spec()->CopyFrom(request.resource_spec());
Task task(task_message);
bool is_actor_creation_task = task.GetTaskSpecification().IsActorCreationTask();
ActorID actor_id = ActorID::Nil();
if (is_actor_creation_task) {
actor_id = task.GetTaskSpecification().ActorCreationId();
// Save the actor creation task spec to GCS, which is needed to
// reconstruct the actor when raylet detect it dies.
std::shared_ptr<rpc::TaskTableData> data = std::make_shared<rpc::TaskTableData>();
data->mutable_task()->mutable_task_spec()->CopyFrom(
task.GetTaskSpecification().GetMessage());
RAY_CHECK_OK(gcs_client_->Tasks().AsyncAdd(data, nullptr));
}
if (new_scheduler_enabled_) {
auto request_resources = task.GetTaskSpecification().GetRequiredResources();
auto work = std::make_pair(
[this, request_resources, reply, send_reply_callback](
std::shared_ptr<Worker> worker, ClientID spillback_to, std::string address,
int port) {
if (worker != nullptr) {
reply->mutable_worker_address()->set_ip_address(
initial_config_.node_manager_address);
reply->mutable_worker_address()->set_port(worker->Port());
reply->mutable_worker_address()->set_worker_id(worker->WorkerId().Binary());
reply->mutable_worker_address()->set_raylet_id(self_node_id_.Binary());
RAY_CHECK(leased_workers_.find(worker->WorkerId()) == leased_workers_.end());
leased_workers_[worker->WorkerId()] = worker;
leased_worker_resources_[worker->WorkerId()] = request_resources;
} else {
reply->mutable_retry_at_raylet_address()->set_ip_address(address);
reply->mutable_retry_at_raylet_address()->set_port(port);
reply->mutable_retry_at_raylet_address()->set_raylet_id(
spillback_to.Binary());
}
send_reply_callback(Status::OK(), nullptr, nullptr);
},
task);
tasks_to_schedule_.push_back(work);
NewSchedulerSchedulePendingTasks();
return;
}
// Override the task dispatch to call back to the client instead of executing the
// task directly on the worker.
RAY_LOG(DEBUG) << "Worker lease request " << task.GetTaskSpecification().TaskId();
TaskID task_id = task.GetTaskSpecification().TaskId();
task.OnDispatchInstead(
[this, task_id, reply, send_reply_callback](
const std::shared_ptr<void> granted, const std::string &address, int port,
const WorkerID &worker_id, const ResourceIdSet &resource_ids) {
RAY_LOG(DEBUG) << "Worker lease request DISPATCH " << task_id;
reply->mutable_worker_address()->set_ip_address(address);
reply->mutable_worker_address()->set_port(port);
reply->mutable_worker_address()->set_worker_id(worker_id.Binary());
reply->mutable_worker_address()->set_raylet_id(self_node_id_.Binary());
for (const auto &mapping : resource_ids.AvailableResources()) {
auto resource = reply->add_resource_mapping();
resource->set_name(mapping.first);
for (const auto &id : mapping.second.WholeIds()) {
auto rid = resource->add_resource_ids();
rid->set_index(id);
rid->set_quantity(1.0);
}
for (const auto &id : mapping.second.FractionalIds()) {
auto rid = resource->add_resource_ids();
rid->set_index(id.first);
rid->set_quantity(id.second.ToDouble());
}
}
send_reply_callback(Status::OK(), nullptr, nullptr);
// TODO(swang): Kill worker if other end hangs up.
// TODO(swang): Implement a lease term by which the owner needs to return the
// worker.
RAY_CHECK(leased_workers_.find(worker_id) == leased_workers_.end())
<< "Worker is already leased out " << worker_id;
leased_workers_[worker_id] = std::static_pointer_cast<Worker>(granted);
});
task.OnSpillbackInstead(
[reply, task_id, send_reply_callback](const ClientID &spillback_to,
const std::string &address, int port) {
RAY_LOG(DEBUG) << "Worker lease request SPILLBACK " << task_id;
reply->mutable_retry_at_raylet_address()->set_ip_address(address);
reply->mutable_retry_at_raylet_address()->set_port(port);
reply->mutable_retry_at_raylet_address()->set_raylet_id(spillback_to.Binary());
send_reply_callback(Status::OK(), nullptr, nullptr);
});
SubmitTask(task, Lineage());
}
void NodeManager::HandleReturnWorker(const rpc::ReturnWorkerRequest &request,
rpc::ReturnWorkerReply *reply,
rpc::SendReplyCallback send_reply_callback) {
// Read the resource spec submitted by the client.
auto worker_id = WorkerID::FromBinary(request.worker_id());
std::shared_ptr<Worker> worker = leased_workers_[worker_id];
if (new_scheduler_enabled_) {
if (worker->IsBlocked()) {
// If worker blocked, unblock it to return the cpu resources back to the worker.
HandleDirectCallTaskUnblocked(worker);
}
auto it = leased_worker_resources_.find(worker_id);
RAY_CHECK(it != leased_worker_resources_.end());
new_resource_scheduler_->AddNodeAvailableResources(self_node_id_.Binary(),
it->second.GetResourceMap());
if (worker->borrowed_cpu_resources_.GetResourceMap().size()) {
// This machine is oversubscribed, so the worker didn't get back cpus when
// unblocked. Thus we need to substract these cpus, as the previous
// "AddNodeAvailableResources" call assumed they were allocated to this worker.
new_resource_scheduler_->SubtractNodeAvailableResources(
self_node_id_.Binary(), worker->borrowed_cpu_resources_.GetResourceMap());
worker->borrowed_cpu_resources_ = ResourceSet();
}
leased_worker_resources_.erase(it);
// Update resource ids.
auto const &task_resources = worker->GetTaskResourceIds();
local_available_resources_.ReleaseConstrained(
task_resources, cluster_resource_map_[self_node_id_].GetTotalResources());
cluster_resource_map_[self_node_id_].Release(task_resources.ToResourceSet());
worker->ResetTaskResourceIds();
// TODO (ion): Handle ProcessDisconnectClientMessage()
HandleWorkerAvailable(worker);
leased_workers_.erase(worker_id);
send_reply_callback(Status::OK(), nullptr, nullptr);
return;
}
leased_workers_.erase(worker_id);
Status status;
if (worker) {
if (request.disconnect_worker()) {
ProcessDisconnectClientMessage(worker->Connection());
} else {
// Handle the edge case where the worker was returned before we got the
// unblock RPC by unblocking it immediately (unblock is idempotent).
if (worker->IsBlocked()) {
HandleDirectCallTaskUnblocked(worker);
}
HandleWorkerAvailable(worker);
}
} else {
status = Status::Invalid("Returned worker does not exist any more");
}
send_reply_callback(status, nullptr, nullptr);
}
void NodeManager::HandleForwardTask(const rpc::ForwardTaskRequest &request,
rpc::ForwardTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) {
// Get the forwarded task and its uncommitted lineage from the request.
TaskID task_id = TaskID::FromBinary(request.task_id());
Lineage uncommitted_lineage;
for (int i = 0; i < request.uncommitted_tasks_size(); i++) {
Task task(request.uncommitted_tasks(i));
RAY_CHECK(uncommitted_lineage.SetEntry(task, GcsStatus::UNCOMMITTED));
}
const Task &task = uncommitted_lineage.GetEntry(task_id)->TaskData();
RAY_LOG(DEBUG) << "Received forwarded task " << task.GetTaskSpecification().TaskId()
<< " on node " << self_node_id_
<< " spillback=" << task.GetTaskExecutionSpec().NumForwards();
SubmitTask(task, uncommitted_lineage, /* forwarded = */ true);
send_reply_callback(Status::OK(), nullptr, nullptr);
}
void NodeManager::ProcessSetResourceRequest(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data) {
// Read the SetResource message
auto message = flatbuffers::GetRoot<protocol::SetResourceRequest>(message_data);
auto const &resource_name = string_from_flatbuf(*message->resource_name());
double const &capacity = message->capacity();
bool is_deletion = capacity <= 0;
ClientID node_id = from_flatbuf<ClientID>(*message->client_id());
// If the python arg was null, set node_id to the local node id.
if (node_id.IsNil()) {
node_id = self_node_id_;
}
if (is_deletion &&
cluster_resource_map_[node_id].GetTotalResources().GetResourceMap().count(
resource_name) == 0) {
// Resource does not exist in the cluster resource map, thus nothing to delete.
// Return..
RAY_LOG(INFO) << "[ProcessDeleteResourceRequest] Trying to delete resource "
<< resource_name << ", but it does not exist. Doing nothing..";
return;
}
// Submit to the resource table. This calls the ResourceCreateUpdated or ResourceDeleted
// callback, which updates cluster_resource_map_.
if (is_deletion) {
RAY_CHECK_OK(
gcs_client_->Nodes().AsyncDeleteResources(node_id, {resource_name}, nullptr));
} else {
std::unordered_map<std::string, std::shared_ptr<gcs::ResourceTableData>> data_map;
auto resource_table_data = std::make_shared<gcs::ResourceTableData>();
resource_table_data->set_resource_capacity(capacity);
data_map.emplace(resource_name, resource_table_data);
RAY_CHECK_OK(gcs_client_->Nodes().AsyncUpdateResources(node_id, data_map, nullptr));
}
}
void NodeManager::ScheduleTasks(
std::unordered_map<ClientID, SchedulingResources> &resource_map) {
// If the resource map contains the local raylet, update load before calling policy.
if (resource_map.count(self_node_id_) > 0) {
resource_map[self_node_id_].SetLoadResources(local_queues_.GetResourceLoad());
}
// Invoke the scheduling policy.
auto policy_decision = scheduling_policy_.Schedule(resource_map, self_node_id_);
#ifndef NDEBUG
RAY_LOG(DEBUG) << "[NM ScheduleTasks] policy decision:";
for (const auto &task_client_pair : policy_decision) {
TaskID task_id = task_client_pair.first;
ClientID node_id = task_client_pair.second;
RAY_LOG(DEBUG) << task_id << " --> " << node_id;
}
#endif
// Extract decision for this raylet.
std::unordered_set<TaskID> local_task_ids;
// Iterate over (taskid, clientid) pairs, extract tasks assigned to the local node.
for (const auto &task_client_pair : policy_decision) {
const TaskID &task_id = task_client_pair.first;
const ClientID &node_id = task_client_pair.second;
if (node_id == self_node_id_) {
local_task_ids.insert(task_id);
} else {
// TODO(atumanov): need a better interface for task exit on forward.
// (See design_docs/task_states.rst for the state transition diagram.)
Task task;
if (local_queues_.RemoveTask(task_id, &task)) {
// Attempt to forward the task. If this fails to forward the task,
// the task will be resubmit locally.
ForwardTaskOrResubmit(task, node_id);
}
}
}
// Transition locally placed tasks to waiting or ready for dispatch.
if (local_task_ids.size() > 0) {
std::vector<Task> tasks = local_queues_.RemoveTasks(local_task_ids);
for (const auto &t : tasks) {
EnqueuePlaceableTask(t);
}
}
// All remaining placeable tasks should be registered with the task dependency
// manager. TaskDependencyManager::TaskPending() is assumed to be idempotent.
// TODO(atumanov): evaluate performance implications of registering all new tasks on
// submission vs. registering remaining queued placeable tasks here.
std::unordered_set<TaskID> move_task_set;
for (const auto &task : local_queues_.GetTasks(TaskState::PLACEABLE)) {
task_dependency_manager_.TaskPending(task);
move_task_set.insert(task.GetTaskSpecification().TaskId());
// Push a warning to the task's driver that this task is currently infeasible.
{
// TODO(rkn): Define this constant somewhere else.
std::string type = "infeasible_task";
std::ostringstream error_message;
error_message
<< "The actor or task with ID " << task.GetTaskSpecification().TaskId()
<< " is infeasible and cannot currently be scheduled. It requires "
<< task.GetTaskSpecification().GetRequiredResources().ToString()
<< " for execution and "
<< task.GetTaskSpecification().GetRequiredPlacementResources().ToString()
<< " for placement, however there are no nodes in the cluster that can "
<< "provide the requested resources. To resolve this issue, consider "
<< "reducing the resource requests of this task or add nodes that "
<< "can fit the task.";
auto error_data_ptr =
gcs::CreateErrorTableData(type, error_message.str(), current_time_ms(),
task.GetTaskSpecification().JobId());
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
// Assert that this placeable task is not feasible locally (necessary but not
// sufficient).
RAY_CHECK(!task.GetTaskSpecification().GetRequiredPlacementResources().IsSubset(
cluster_resource_map_[self_node_id_].GetTotalResources()));
}
// Assumption: all remaining placeable tasks are infeasible and are moved to the
// infeasible task queue. Infeasible task queue is checked when new nodes join.
local_queues_.MoveTasks(move_task_set, TaskState::PLACEABLE, TaskState::INFEASIBLE);
// Check the invariant that no placeable tasks remain after a call to the policy.
RAY_CHECK(local_queues_.GetTasks(TaskState::PLACEABLE).size() == 0);
}
bool NodeManager::CheckDependencyManagerInvariant() const {
std::vector<TaskID> pending_task_ids = task_dependency_manager_.GetPendingTasks();
// Assert that each pending task in the task dependency manager is in one of the queues.
for (const auto &task_id : pending_task_ids) {
if (!local_queues_.HasTask(task_id)) {
return false;
}
}
// TODO(atumanov): perform the check in the opposite direction.
return true;
}
void NodeManager::TreatTaskAsFailed(const Task &task, const ErrorType &error_type) {
const TaskSpecification &spec = task.GetTaskSpecification();
RAY_LOG(DEBUG) << "Treating task " << spec.TaskId() << " as failed because of error "
<< ErrorType_Name(error_type) << ".";
// If this was an actor creation task that tried to resume from a checkpoint,
// then erase it here since the task did not finish.
if (spec.IsActorCreationTask()) {
ActorID actor_id = spec.ActorCreationId();
checkpoint_id_to_restore_.erase(actor_id);
}
// Loop over the return IDs (except the dummy ID) and store a fake object in
// the object store.
int64_t num_returns = spec.NumReturns();
if (spec.IsActorCreationTask() || spec.IsActorTask()) {
// TODO(rkn): We subtract 1 to avoid the dummy ID. However, this leaks
// information about the TaskSpecification implementation.
num_returns -= 1;
}
// Determine which IDs should be marked as failed.
std::vector<plasma::ObjectID> objects_to_fail;
for (int64_t i = 0; i < num_returns; i++) {
objects_to_fail.push_back(spec.ReturnId(i, TaskTransportType::RAYLET).ToPlasmaId());
}
const JobID job_id = task.GetTaskSpecification().JobId();
MarkObjectsAsFailed(error_type, objects_to_fail, job_id);
task_dependency_manager_.TaskCanceled(spec.TaskId());
// Notify the task dependency manager that we no longer need this task's
// object dependencies. TODO(swang): Ideally, we would check the return value
// here. However, we don't know at this point if the task was in the WAITING
// or READY queue before, in which case we would not have been subscribed to
// its dependencies.
task_dependency_manager_.UnsubscribeGetDependencies(spec.TaskId());
}
void NodeManager::MarkObjectsAsFailed(const ErrorType &error_type,
const std::vector<plasma::ObjectID> objects_to_fail,
const JobID &job_id) {
const std::string meta = std::to_string(static_cast<int>(error_type));
for (const auto &object_id : objects_to_fail) {
arrow::Status status = store_client_.CreateAndSeal(object_id, "", meta);
if (!status.ok() && !plasma::IsPlasmaObjectExists(status)) {
// If we failed to save the error code, log a warning and push an error message
// to the driver.
std::ostringstream stream;
stream << "An plasma error (" << status.ToString() << ") occurred while saving"
<< " error code to object " << object_id << ". Anyone who's getting this"
<< " object may hang forever.";
std::string error_message = stream.str();
RAY_LOG(WARNING) << error_message;
auto error_data_ptr =
gcs::CreateErrorTableData("task", error_message, current_time_ms(), job_id);
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
}
}
void NodeManager::TreatTaskAsFailedIfLost(const Task &task) {
const TaskSpecification &spec = task.GetTaskSpecification();
RAY_LOG(DEBUG) << "Treating task " << spec.TaskId()
<< " as failed if return values lost.";
// Loop over the return IDs (except the dummy ID) and check whether a
// location for the return ID exists.
int64_t num_returns = spec.NumReturns();
if (spec.IsActorCreationTask() || spec.IsActorTask()) {
// TODO(rkn): We subtract 1 to avoid the dummy ID. However, this leaks
// information about the TaskSpecification implementation.
num_returns -= 1;
}
// Use a shared flag to make sure that we only treat the task as failed at
// most once. This flag will get deallocated once all of the object table
// lookup callbacks are fired.
auto task_marked_as_failed = std::make_shared<bool>(false);
for (int64_t i = 0; i < num_returns; i++) {
const ObjectID object_id = spec.ReturnId(i, TaskTransportType::RAYLET);
// Lookup the return value's locations.
RAY_CHECK_OK(object_directory_->LookupLocations(
object_id, [this, task_marked_as_failed, task](
const ray::ObjectID &object_id,
const std::unordered_set<ray::ClientID> &clients) {
if (!*task_marked_as_failed) {
// Only process the object locations if we haven't already marked the
// task as failed.
if (clients.empty()) {
// The object does not exist on any nodes but has been created
// before, so the object has been lost. Mark the task as failed to
// prevent any tasks that depend on this object from hanging.
TreatTaskAsFailed(task, ErrorType::OBJECT_UNRECONSTRUCTABLE);
*task_marked_as_failed = true;
}
}
}));
}
}
void NodeManager::SubmitTask(const Task &task, const Lineage &uncommitted_lineage,
bool forwarded) {
stats::TaskCountReceived().Record(1);
const TaskSpecification &spec = task.GetTaskSpecification();
const TaskID &task_id = spec.TaskId();
RAY_LOG(DEBUG) << "Submitting task: " << task.DebugString();
if (local_queues_.HasTask(task_id)) {
RAY_LOG(WARNING) << "Submitted task " << task_id
<< " is already queued and will not be reconstructed. This is most "
"likely due to spurious reconstruction.";
return;
}
// Add the task and its uncommitted lineage to the lineage cache.
if (forwarded) {
lineage_cache_.AddUncommittedLineage(task_id, uncommitted_lineage);
} else {
if (!lineage_cache_.CommitTask(task)) {
RAY_LOG(WARNING)
<< "Task " << task_id
<< " already committed to the GCS. This is most likely due to reconstruction.";
}
}
if (spec.IsActorTask()) {
// Check whether we know the location of the actor.
const auto actor_entry = actor_registry_.find(spec.ActorId());
bool seen = actor_entry != actor_registry_.end();
// If we have already seen this actor and this actor is not being reconstructed,
// its location is known.
bool location_known =
seen && actor_entry->second.GetState() != ActorTableData::RECONSTRUCTING;
if (location_known) {
if (actor_entry->second.GetState() == ActorTableData::DEAD) {
// If this actor is dead, either because the actor process is dead
// or because its residing node is dead, treat this task as failed.
TreatTaskAsFailed(task, ErrorType::ACTOR_DIED);
} else {
// If this actor is alive, check whether this actor is local.
auto node_manager_id = actor_entry->second.GetNodeManagerId();
if (node_manager_id == self_node_id_) {
// The actor is local.
int64_t expected_task_counter =
GetExpectedTaskCounter(actor_registry_, spec.ActorId(), spec.CallerId());
if (static_cast<int64_t>(spec.ActorCounter()) < expected_task_counter) {
// A task that has already been executed before has been found. The
// task will be treated as failed if at least one of the task's
// return values have been evicted, to prevent the application from
// hanging.
// TODO(swang): Clean up the task from the lineage cache? If the
// task is not marked as failed, then it may never get marked as
// ready to flush to the GCS.
RAY_LOG(WARNING) << "A task was resubmitted, so we are ignoring it. This "
<< "should only happen during reconstruction.";
TreatTaskAsFailedIfLost(task);
} else {
// The task has not yet been executed. Queue the task for local
// execution, bypassing placement.
EnqueuePlaceableTask(task);
}
} else {
// The actor is remote. Forward the task to the node manager that owns
// the actor.
// Attempt to forward the task. If this fails to forward the task,
// the task will be resubmit locally.
ForwardTaskOrResubmit(task, node_manager_id);
}
}
} else {
ObjectID actor_creation_dummy_object;
if (!seen) {
// We do not have a registered location for the object, so either the
// actor has not yet been created or we missed the notification for the
// actor creation because this node joined the cluster after the actor
// was already created. Look up the actor's registered location in case
// we missed the creation notification.
const ActorID &actor_id = spec.ActorId();
auto lookup_callback =
[this, actor_id](Status status, const boost::optional<ActorTableData> &data) {
if (data) {
// The actor has been created. We only need the last entry, because
// it represents the latest state of this actor.
HandleActorStateTransition(actor_id, ActorRegistration(*data));
}
};
RAY_CHECK_OK(gcs_client_->Actors().AsyncGet(actor_id, lookup_callback));
actor_creation_dummy_object = spec.ActorCreationDummyObjectId();
} else {
actor_creation_dummy_object = actor_entry->second.GetActorCreationDependency();
}
// Keep the task queued until we discover the actor's location.
// (See design_docs/task_states.rst for the state transition diagram.)
local_queues_.QueueTasks({task}, TaskState::WAITING_FOR_ACTOR_CREATION);
// The actor has not yet been created and may have failed. To make sure
// that the actor is eventually recreated, we maintain the invariant that
// if a task is in the MethodsWaitingForActorCreation queue, then it is
// subscribed to its respective actor creation task and that task only.
// Once the actor has been created and this method removed from the
// waiting queue, the caller must make the corresponding call to
// UnsubscribeGetDependencies.
task_dependency_manager_.SubscribeGetDependencies(spec.TaskId(),
{actor_creation_dummy_object});
// Mark the task as pending. It will be canceled once we discover the
// actor's location and either execute the task ourselves or forward it
// to another node.
task_dependency_manager_.TaskPending(task);
}
} else {
// This is a non-actor task. Queue the task for a placement decision or for dispatch
// if the task was forwarded.
if (forwarded) {
// Check for local dependencies and enqueue as waiting or ready for dispatch.
EnqueuePlaceableTask(task);
} else {
// (See design_docs/task_states.rst for the state transition diagram.)
local_queues_.QueueTasks({task}, TaskState::PLACEABLE);
ScheduleTasks(cluster_resource_map_);
// TODO(atumanov): assert that !placeable.isempty() => insufficient available
// resources locally.
}
}
}
void NodeManager::HandleDirectCallTaskBlocked(const std::shared_ptr<Worker> &worker) {
if (new_scheduler_enabled_) {
if (!worker) {
return;
}
auto const cpu_resource_ids = worker->ReleaseTaskCpuResources();
local_available_resources_.Release(cpu_resource_ids);
cluster_resource_map_[self_node_id_].Release(cpu_resource_ids.ToResourceSet());
new_resource_scheduler_->AddNodeAvailableResources(
self_node_id_.Binary(), // A
cpu_resource_ids.ToResourceSet().GetResourceMap());
worker->MarkBlocked();
NewSchedulerSchedulePendingTasks();
return;
}
if (!worker || worker->GetAssignedTaskId().IsNil() || worker->IsBlocked()) {
return; // The worker may have died or is no longer processing the task.
}
auto const cpu_resource_ids = worker->ReleaseTaskCpuResources();
local_available_resources_.Release(cpu_resource_ids);
cluster_resource_map_[self_node_id_].Release(cpu_resource_ids.ToResourceSet());
worker->MarkBlocked();
DispatchTasks(local_queues_.GetReadyTasksByClass());
}
void NodeManager::HandleDirectCallTaskUnblocked(const std::shared_ptr<Worker> &worker) {
if (new_scheduler_enabled_) {
if (!worker) {
return;
}
auto it = leased_worker_resources_.find(worker->WorkerId());
RAY_CHECK(it != leased_worker_resources_.end());
const auto cpu_resources = it->second.GetNumCpus();
bool oversubscribed = !local_available_resources_.Contains(cpu_resources);
if (!oversubscribed) {
// Reacquire the CPU resources for the worker. Note that care needs to be
// taken if the user is using the specific CPU IDs since the IDs that we
// reacquire here may be different from the ones that the task started with.
auto const resource_ids = local_available_resources_.Acquire(cpu_resources);
worker->AcquireTaskCpuResources(resource_ids);
cluster_resource_map_[self_node_id_].Acquire(cpu_resources);
new_resource_scheduler_->SubtractNodeAvailableResources(
self_node_id_.Binary(), cpu_resources.GetResourceMap());
worker->borrowed_cpu_resources_ = ResourceSet();
} else {
// Remember these are borrowed cpus resources, i.e., we did not return then to the
// worker.
worker->borrowed_cpu_resources_ = cpu_resources;
}
worker->MarkUnblocked();
NewSchedulerSchedulePendingTasks();
return;
}
if (!worker || worker->GetAssignedTaskId().IsNil() || !worker->IsBlocked()) {
return; // The worker may have died or is no longer processing the task.
}
TaskID task_id = worker->GetAssignedTaskId();
Task task = local_queues_.GetTaskOfState(task_id, TaskState::RUNNING);
const auto required_resources = task.GetTaskSpecification().GetRequiredResources();
const ResourceSet cpu_resources = required_resources.GetNumCpus();
bool oversubscribed = !local_available_resources_.Contains(cpu_resources);
if (!oversubscribed) {
// Reacquire the CPU resources for the worker. Note that care needs to be
// taken if the user is using the specific CPU IDs since the IDs that we
// reacquire here may be different from the ones that the task started with.
auto const resource_ids = local_available_resources_.Acquire(cpu_resources);
worker->AcquireTaskCpuResources(resource_ids);
cluster_resource_map_[self_node_id_].Acquire(cpu_resources);
} else {
// In this case, we simply don't reacquire the CPU resources for the worker.
// The worker can keep running and when the task finishes, it will simply
// not have any CPU resources to release.
RAY_LOG(WARNING)
<< "Resources oversubscribed: "
<< cluster_resource_map_[self_node_id_].GetAvailableResources().ToString();
}
worker->MarkUnblocked();
task_dependency_manager_.UnsubscribeGetDependencies(task_id);
}
void NodeManager::AsyncResolveObjects(
const std::shared_ptr<LocalClientConnection> &client,
const std::vector<ObjectID> &required_object_ids, const TaskID ¤t_task_id,
bool ray_get, bool mark_worker_blocked) {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
if (worker) {
// The client is a worker. If the worker is not already blocked and the
// blocked task matches the one assigned to the worker, then mark the
// worker as blocked. This temporarily releases any resources that the
// worker holds while it is blocked.
if (mark_worker_blocked && !worker->IsBlocked() &&
current_task_id == worker->GetAssignedTaskId()) {
Task task;
RAY_CHECK(local_queues_.RemoveTask(current_task_id, &task));
local_queues_.QueueTasks({task}, TaskState::RUNNING);
// Get the CPU resources required by the running task.
// Release the CPU resources.
auto const cpu_resource_ids = worker->ReleaseTaskCpuResources();
local_available_resources_.Release(cpu_resource_ids);
cluster_resource_map_[self_node_id_].Release(cpu_resource_ids.ToResourceSet());
worker->MarkBlocked();
// Try dispatching tasks since we may have released some resources.
DispatchTasks(local_queues_.GetReadyTasksByClass());
}
} else {
// The client is a driver. Drivers do not hold resources, so we simply mark
// the task as blocked.
worker = worker_pool_.GetRegisteredDriver(client);
}
RAY_CHECK(worker);
// Mark the task as blocked.
if (mark_worker_blocked) {
worker->AddBlockedTaskId(current_task_id);
if (local_queues_.GetBlockedTaskIds().count(current_task_id) == 0) {
local_queues_.AddBlockedTaskId(current_task_id);
}
}
// Subscribe to the objects required by the task. These objects will be
// fetched and/or reconstructed as necessary, until the objects become local
// or are unsubscribed.
if (ray_get) {
// TODO(ekl) using the assigned task id is a hack to handle unsubscription for
// HandleDirectCallUnblocked.
task_dependency_manager_.SubscribeGetDependencies(
mark_worker_blocked ? current_task_id : worker->GetAssignedTaskId(),
required_object_ids);
} else {
task_dependency_manager_.SubscribeWaitDependencies(worker->WorkerId(),
required_object_ids);
}
}
void NodeManager::AsyncResolveObjectsFinish(
const std::shared_ptr<LocalClientConnection> &client, const TaskID ¤t_task_id,
bool was_blocked) {
std::shared_ptr<Worker> worker = worker_pool_.GetRegisteredWorker(client);
// TODO(swang): Because the object dependencies are tracked in the task
// dependency manager, we could actually remove this message entirely and
// instead unblock the worker once all the objects become available.
if (worker) {
// The client is a worker. If the worker is not already unblocked and the
// unblocked task matches the one assigned to the worker, then mark the
// worker as unblocked. This returns the temporarily released resources to
// the worker. Workers that have been marked dead have already been cleaned
// up.
if (was_blocked && worker->IsBlocked() &&
current_task_id == worker->GetAssignedTaskId() && !worker->IsDead()) {
// (See design_docs/task_states.rst for the state transition diagram.)
Task task;
RAY_CHECK(local_queues_.RemoveTask(current_task_id, &task));
local_queues_.QueueTasks({task}, TaskState::RUNNING);
// Get the CPU resources required by the running task.
const auto required_resources = task.GetTaskSpecification().GetRequiredResources();
const ResourceSet cpu_resources = required_resources.GetNumCpus();
// Check if we can reacquire the CPU resources.
bool oversubscribed = !local_available_resources_.Contains(cpu_resources);
if (!oversubscribed) {
// Reacquire the CPU resources for the worker. Note that care needs to be
// taken if the user is using the specific CPU IDs since the IDs that we
// reacquire here may be different from the ones that the task started with.
auto const resource_ids = local_available_resources_.Acquire(cpu_resources);
worker->AcquireTaskCpuResources(resource_ids);
cluster_resource_map_[self_node_id_].Acquire(cpu_resources);
} else {
// In this case, we simply don't reacquire the CPU resources for the worker.
// The worker can keep running and when the task finishes, it will simply
// not have any CPU resources to release.
RAY_LOG(WARNING)
<< "Resources oversubscribed: "
<< cluster_resource_map_[self_node_id_].GetAvailableResources().ToString();
}
worker->MarkUnblocked();
}
} else {
// The client is a driver. Drivers do not hold resources, so we simply
// mark the driver as unblocked.
worker = worker_pool_.GetRegisteredDriver(client);
}
// Unsubscribe from any `ray.get` objects that the task was blocked on. Any
// fetch or reconstruction operations to make the objects local are canceled.
// `ray.wait` calls will stay active until the objects become local, or the
// task/actor that called `ray.wait` exits.
task_dependency_manager_.UnsubscribeGetDependencies(current_task_id);
// Mark the task as unblocked.
RAY_CHECK(worker);
if (was_blocked) {
worker->RemoveBlockedTaskId(current_task_id);
local_queues_.RemoveBlockedTaskId(current_task_id);
}
}
void NodeManager::EnqueuePlaceableTask(const Task &task) {
// TODO(atumanov): add task lookup hashmap and change EnqueuePlaceableTask to take
// a vector of TaskIDs. Trigger MoveTask internally.
// Subscribe to the task's dependencies.
bool args_ready = task_dependency_manager_.SubscribeGetDependencies(
task.GetTaskSpecification().TaskId(), task.GetDependencies());
// Enqueue the task. If all dependencies are available, then the task is queued
// in the READY state, else the WAITING state.
// (See design_docs/task_states.rst for the state transition diagram.)
if (args_ready) {
local_queues_.QueueTasks({task}, TaskState::READY);
DispatchTasks(MakeTasksByClass({task}));
} else {
local_queues_.QueueTasks({task}, TaskState::WAITING);
}
// Mark the task as pending. Once the task has finished execution, or once it
// has been forwarded to another node, the task must be marked as canceled in
// the TaskDependencyManager.
task_dependency_manager_.TaskPending(task);
}
void NodeManager::AssignTask(const std::shared_ptr<Worker> &worker, const Task &task,
std::vector<std::function<void()>> *post_assign_callbacks) {
const TaskSpecification &spec = task.GetTaskSpecification();
RAY_CHECK(post_assign_callbacks);
// If this is an actor task, check that the new task has the correct counter.
if (spec.IsActorTask()) {
// An actor task should only be ready to be assigned if it matches the
// expected task counter.
int64_t expected_task_counter =
GetExpectedTaskCounter(actor_registry_, spec.ActorId(), spec.CallerId());
RAY_CHECK(static_cast<int64_t>(spec.ActorCounter()) == expected_task_counter)
<< "Expected actor counter: " << expected_task_counter << ", task "
<< spec.TaskId() << " has: " << spec.ActorCounter();
}
RAY_LOG(DEBUG) << "Assigning task " << spec.TaskId() << " to worker with pid "
<< worker->Process().get()->id()
<< ", worker id: " << worker->WorkerId();
flatbuffers::FlatBufferBuilder fbb;
// Resource accounting: acquire resources for the assigned task.
auto acquired_resources =
local_available_resources_.Acquire(spec.GetRequiredResources());
cluster_resource_map_[self_node_id_].Acquire(spec.GetRequiredResources());
if (spec.IsActorCreationTask()) {
// Check that the actor's placement resource requirements are satisfied.
RAY_CHECK(spec.GetRequiredPlacementResources().IsSubset(
cluster_resource_map_[self_node_id_].GetTotalResources()));
worker->SetLifetimeResourceIds(acquired_resources);
} else {
worker->SetTaskResourceIds(acquired_resources);
}
auto task_id = spec.TaskId();
if (task.OnDispatch() != nullptr) {
if (task.GetTaskSpecification().IsDetachedActor()) {
worker->MarkDetachedActor();
}
task.OnDispatch()(worker, initial_config_.node_manager_address, worker->Port(),
worker->WorkerId(),
spec.IsActorCreationTask() ? worker->GetLifetimeResourceIds()
: worker->GetTaskResourceIds());
post_assign_callbacks->push_back([this, worker, task_id]() {
RAY_LOG(DEBUG) << "Finished assigning task " << task_id << " to worker "
<< worker->WorkerId();
FinishAssignTask(worker, task_id, /*success=*/true);
});
} else {
ResourceIdSet resource_id_set =
worker->GetTaskResourceIds().Plus(worker->GetLifetimeResourceIds());
if (worker->AssignTask(task, resource_id_set).ok()) {
RAY_LOG(DEBUG) << "Assigned task " << task_id << " to worker "
<< worker->WorkerId();
post_assign_callbacks->push_back([this, worker, task_id]() {
FinishAssignTask(worker, task_id, /*success=*/true);
});
} else {
RAY_LOG(ERROR) << "Failed to assign task " << task_id << " to worker "
<< worker->WorkerId() << ", disconnecting client";
post_assign_callbacks->push_back([this, worker, task_id]() {
FinishAssignTask(worker, task_id, /*success=*/false);
});
}
}
}
bool NodeManager::FinishAssignedTask(Worker &worker) {
TaskID task_id = worker.GetAssignedTaskId();
RAY_LOG(DEBUG) << "Finished task " << task_id;
// (See design_docs/task_states.rst for the state transition diagram.)
Task task;
RAY_CHECK(local_queues_.RemoveTask(task_id, &task));
// Release task's resources. The worker's lifetime resources are still held.
auto const &task_resources = worker.GetTaskResourceIds();
local_available_resources_.ReleaseConstrained(
task_resources, cluster_resource_map_[self_node_id_].GetTotalResources());
cluster_resource_map_[self_node_id_].Release(task_resources.ToResourceSet());
worker.ResetTaskResourceIds();
const auto &spec = task.GetTaskSpecification();
if ((spec.IsActorCreationTask() || spec.IsActorTask())) {
// If this was an actor or actor creation task, handle the actor's new
// state.
FinishAssignedActorTask(worker, task);
} else {
// If this was a non-actor task, then cancel any ray.wait calls that were
// made during the task execution.
task_dependency_manager_.UnsubscribeWaitDependencies(worker.WorkerId());
}
// Notify the task dependency manager that this task has finished execution.
task_dependency_manager_.TaskCanceled(task_id);
// Unset the worker's assigned job Id if this is not an actor.
if (!spec.IsActorCreationTask() && !spec.IsActorTask()) {
worker.AssignJobId(JobID::Nil());
}
if (!spec.IsDirectActorCreationCall()) {
// Unset the worker's assigned task. We keep the assigned task ID for
// direct actor creation calls because this ID is used later if the actor
// requires objects from plasma.
worker.AssignTaskId(TaskID::Nil());
}
// Direct actors will be assigned tasks via the core worker and therefore are
// not idle.
return !spec.IsDirectActorCreationCall();
}
std::shared_ptr<ActorTableData> NodeManager::CreateActorTableDataFromCreationTask(
const TaskSpecification &task_spec, int port, const WorkerID &worker_id) {
RAY_CHECK(task_spec.IsActorCreationTask());
auto actor_id = task_spec.ActorCreationId();
auto actor_entry = actor_registry_.find(actor_id);
std::shared_ptr<ActorTableData> actor_info_ptr;
// TODO(swang): If this is an actor that was reconstructed, and previous
// actor notifications were delayed, then this node may not have an entry for
// the actor in actor_regisry_. Then, the fields for the number of
// reconstructions will be wrong.
if (actor_entry == actor_registry_.end()) {
actor_info_ptr.reset(new ActorTableData());
// Set all of the static fields for the actor. These fields will not
// change even if the actor fails or is reconstructed.
actor_info_ptr->set_actor_id(actor_id.Binary());
actor_info_ptr->set_actor_creation_dummy_object_id(
task_spec.ActorDummyObject().Binary());
actor_info_ptr->set_job_id(task_spec.JobId().Binary());
actor_info_ptr->set_max_reconstructions(task_spec.MaxActorReconstructions());
// This is the first time that the actor has been created, so the number
// of remaining reconstructions is the max.
actor_info_ptr->set_remaining_reconstructions(task_spec.MaxActorReconstructions());
actor_info_ptr->set_is_direct_call(task_spec.IsDirectActorCreationCall());
actor_info_ptr->set_is_detached(task_spec.IsDetachedActor());
actor_info_ptr->mutable_owner_address()->CopyFrom(
task_spec.GetMessage().caller_address());
} else {
// If we've already seen this actor, it means that this actor was reconstructed.
// Thus, its previous state must be RECONSTRUCTING.
// TODO: The following is a workaround for the issue described in
// https://github.com/ray-project/ray/issues/5524, please see the issue
// description for more information.
if (actor_entry->second.GetState() != ActorTableData::RECONSTRUCTING) {
RAY_LOG(WARNING) << "Actor not in reconstructing state, most likely it "
<< "died before creation handler could run. Actor state is "
<< actor_entry->second.GetState();
}
// Copy the static fields from the current actor entry.
actor_info_ptr.reset(new ActorTableData(actor_entry->second.GetTableData()));
// We are reconstructing the actor, so subtract its
// remaining_reconstructions by 1.
actor_info_ptr->set_remaining_reconstructions(
actor_info_ptr->remaining_reconstructions() - 1);
}
// Set the new fields for the actor's state to indicate that the actor is
// now alive on this node manager.
actor_info_ptr->mutable_address()->set_ip_address(
gcs_client_->Nodes().GetSelfInfo().node_manager_address());
actor_info_ptr->mutable_address()->set_port(port);
actor_info_ptr->mutable_address()->set_raylet_id(self_node_id_.Binary());
actor_info_ptr->mutable_address()->set_worker_id(worker_id.Binary());
actor_info_ptr->set_state(ActorTableData::ALIVE);
actor_info_ptr->set_timestamp(current_time_ms());
return actor_info_ptr;
}
void NodeManager::FinishAssignedActorTask(Worker &worker, const Task &task) {
RAY_LOG(INFO) << "Finishing assigned actor task";
ActorID actor_id;
TaskID caller_id;
const TaskSpecification task_spec = task.GetTaskSpecification();
bool resumed_from_checkpoint = false;
if (task_spec.IsActorCreationTask()) {
actor_id = task_spec.ActorCreationId();
caller_id = TaskID::Nil();
if (checkpoint_id_to_restore_.count(actor_id) > 0) {
resumed_from_checkpoint = true;
}
} else {
actor_id = task_spec.ActorId();
caller_id = task_spec.CallerId();
}
if (task_spec.IsActorCreationTask()) {
// This was an actor creation task. Convert the worker to an actor.
worker.AssignActorId(actor_id);
if (task_spec.IsDetachedActor()) {
worker.MarkDetachedActor();
}
// Lookup the parent actor id.
auto parent_task_id = task_spec.ParentTaskId();
int port = worker.Port();
auto worker_id = worker.WorkerId();
RAY_CHECK_OK(
gcs_client_->Tasks().AsyncGet(
parent_task_id,
/*callback=*/
[this, task_spec, resumed_from_checkpoint, port, parent_task_id, worker_id](
Status status, const boost::optional<TaskTableData> &parent_task_data) {
if (parent_task_data) {
// The task was in the GCS task table. Use the stored task spec to
// get the parent actor id.
Task parent_task(parent_task_data->task());
ActorID parent_actor_id = ActorID::Nil();
if (parent_task.GetTaskSpecification().IsActorCreationTask()) {
parent_actor_id = parent_task.GetTaskSpecification().ActorCreationId();
} else if (parent_task.GetTaskSpecification().IsActorTask()) {
parent_actor_id = parent_task.GetTaskSpecification().ActorId();
}
FinishAssignedActorCreationTask(parent_actor_id, task_spec,
resumed_from_checkpoint, port, worker_id);
return;
}
// The parent task was not in the GCS task table. It should most likely be
// in the lineage cache.
ActorID parent_actor_id = ActorID::Nil();
if (lineage_cache_.ContainsTask(parent_task_id)) {
// Use a copy of the cached task spec to get the parent actor id.
Task parent_task = lineage_cache_.GetTaskOrDie(parent_task_id);
if (parent_task.GetTaskSpecification().IsActorCreationTask()) {
parent_actor_id = parent_task.GetTaskSpecification().ActorCreationId();
} else if (parent_task.GetTaskSpecification().IsActorTask()) {
parent_actor_id = parent_task.GetTaskSpecification().ActorId();
}
} else {
RAY_LOG(WARNING)
<< "Task metadata not found in either GCS or lineage cache. It may "
"have "
"been "
"evicted "
<< "by the redis LRU configuration. Consider increasing the memory "
"allocation via "
<< "ray.init(redis_max_memory=<max_memory_bytes>).";
}
FinishAssignedActorCreationTask(parent_actor_id, task_spec,
resumed_from_checkpoint, port, worker_id);
}));
} else {
auto actor_entry = actor_registry_.find(actor_id);
RAY_CHECK(actor_entry != actor_registry_.end());
// Extend the actor's frontier to include the executed task.
const ObjectID object_to_release =
actor_entry->second.ExtendFrontier(caller_id, task_spec.ActorDummyObject());
if (!object_to_release.IsNil()) {
// If there were no new actor handles created, then no other actor task
// will depend on this execution dependency, so it safe to release.
HandleObjectMissing(object_to_release);
}
// Mark the dummy object as locally available to indicate that the actor's
// state has changed and the next method can run. This is not added to the
// object table, so the update will be invisible to both the local object
// manager and the other nodes.
// NOTE(swang): The dummy objects must be marked as local whenever
// ExtendFrontier is called, and vice versa, so that we can clean up the
// dummy objects properly in case the actor fails and needs to be
// reconstructed.
HandleObjectLocal(task_spec.ActorDummyObject());
}
}
void NodeManager::FinishAssignedActorCreationTask(const ActorID &parent_actor_id,
const TaskSpecification &task_spec,
bool resumed_from_checkpoint, int port,
const WorkerID &worker_id) {
// Notify the other node managers that the actor has been created.
const ActorID actor_id = task_spec.ActorCreationId();
auto new_actor_info = CreateActorTableDataFromCreationTask(task_spec, port, worker_id);
new_actor_info->set_parent_id(parent_actor_id.Binary());
auto update_callback = [actor_id](Status status) {
if (!status.ok()) {
// Only one node at a time should succeed at creating or updating the actor.
RAY_LOG(FATAL) << "Failed to update state to ALIVE for actor " << actor_id;
}
};
if (resumed_from_checkpoint) {
// This actor was resumed from a checkpoint. In this case, we first look
// up the checkpoint in GCS and use it to restore the actor registration
// and frontier.
const auto checkpoint_id = checkpoint_id_to_restore_[actor_id];
checkpoint_id_to_restore_.erase(actor_id);
RAY_LOG(DEBUG) << "Looking up checkpoint " << checkpoint_id << " for actor "
<< actor_id;
RAY_CHECK_OK(gcs_client_->Actors().AsyncGetCheckpoint(
checkpoint_id,
[this, checkpoint_id, actor_id, new_actor_info, update_callback](
Status status, const boost::optional<ActorCheckpointData> &checkpoint_data) {
RAY_CHECK(checkpoint_data) << "Couldn't find checkpoint " << checkpoint_id
<< " for actor " << actor_id << " in GCS.";
RAY_LOG(INFO) << "Restoring registration for actor " << actor_id
<< " from checkpoint " << checkpoint_id;
ActorRegistration actor_registration =
ActorRegistration(*new_actor_info, *checkpoint_data);
// Mark the unreleased dummy objects in the checkpoint frontier as local.
for (const auto &entry : actor_registration.GetDummyObjects()) {
HandleObjectLocal(entry.first);
}
HandleActorStateTransition(actor_id, std::move(actor_registration));
// The actor was created before.
RAY_CHECK_OK(gcs_client_->Actors().AsyncUpdate(actor_id, new_actor_info,
update_callback));
}));
} else {
// The actor did not resume from a checkpoint. Immediately notify the
// other node managers that the actor has been created.
HandleActorStateTransition(actor_id, ActorRegistration(*new_actor_info));
if (actor_registry_.find(actor_id) != actor_registry_.end()) {
// The actor was created before.
RAY_CHECK_OK(
gcs_client_->Actors().AsyncUpdate(actor_id, new_actor_info, update_callback));
} else {
// The actor was never created before.
RAY_CHECK_OK(gcs_client_->Actors().AsyncRegister(new_actor_info, update_callback));
}
}
if (!resumed_from_checkpoint) {
// The actor was not resumed from a checkpoint. Store the
// initial dummy object. All future handles to the actor will
// depend on this object.
HandleObjectLocal(task_spec.ActorDummyObject());
}
}
void NodeManager::HandleTaskReconstruction(const TaskID &task_id,
const ObjectID &required_object_id) {
// Retrieve the task spec in order to re-execute the task.
RAY_CHECK_OK(gcs_client_->Tasks().AsyncGet(
task_id,
/*callback=*/
[this, required_object_id, task_id](
Status status, const boost::optional<TaskTableData> &task_data) {
if (task_data) {
// The task was in the GCS task table. Use the stored task spec to
// re-execute the task.
ResubmitTask(Task(task_data->task()), required_object_id);
return;
}
// The task was not in the GCS task table. It must therefore be in the
// lineage cache.
if (lineage_cache_.ContainsTask(task_id)) {
// Use a copy of the cached task spec to re-execute the task.
const Task task = lineage_cache_.GetTaskOrDie(task_id);
ResubmitTask(task, required_object_id);
} else {
RAY_LOG(WARNING)
<< "Metadata of task " << task_id
<< " not found in either GCS or lineage cache. It may have been evicted "
<< "by the redis LRU configuration. Consider increasing the memory "
"allocation via "
<< "ray.init(redis_max_memory=<max_memory_bytes>).";
MarkObjectsAsFailed(ErrorType::OBJECT_UNRECONSTRUCTABLE,
{required_object_id.ToPlasmaId()}, JobID::Nil());
}
}));
}
void NodeManager::ResubmitTask(const Task &task, const ObjectID &required_object_id) {
RAY_LOG(DEBUG) << "Attempting to resubmit task "
<< task.GetTaskSpecification().TaskId();
// Actors should only be recreated if the first initialization failed or if
// the most recent instance of the actor failed.
if (task.GetTaskSpecification().IsActorCreationTask()) {
const auto &actor_id = task.GetTaskSpecification().ActorCreationId();
const auto it = actor_registry_.find(actor_id);
if (it != actor_registry_.end() && it->second.GetState() == ActorTableData::ALIVE) {
// If the actor is still alive, then do not resubmit the task. If the
// actor actually is dead and a result is needed, then reconstruction
// for this task will be triggered again.
RAY_LOG(WARNING)
<< "Actor creation task resubmitted, but the actor is still alive.";
return;
}
}
// Driver tasks cannot be reconstructed. If this is a driver task, push an
// error to the driver and do not resubmit it.
if (task.GetTaskSpecification().IsDriverTask()) {
// TODO(rkn): Define this constant somewhere else.
std::string type = "put_reconstruction";
std::ostringstream error_message;
error_message << "The task with ID " << task.GetTaskSpecification().TaskId()
<< " is a driver task and so the object created by ray.put "
<< "could not be reconstructed.";
auto error_data_ptr =
gcs::CreateErrorTableData(type, error_message.str(), current_time_ms(),
task.GetTaskSpecification().JobId());
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
MarkObjectsAsFailed(ErrorType::OBJECT_UNRECONSTRUCTABLE,
{required_object_id.ToPlasmaId()},
task.GetTaskSpecification().JobId());
return;
}
RAY_LOG(INFO) << "Resubmitting task " << task.GetTaskSpecification().TaskId()
<< " on node " << self_node_id_;
// The task may be reconstructed. Submit it with an empty lineage, since any
// uncommitted lineage must already be in the lineage cache. At this point,
// the task should not yet exist in the local scheduling queue. If it does,
// then this is a spurious reconstruction.
SubmitTask(task, Lineage());
}
void NodeManager::HandleObjectLocal(const ObjectID &object_id) {
// Notify the task dependency manager that this object is local.
const auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(object_id);
RAY_LOG(DEBUG) << "Object local " << object_id << ", "
<< " on " << self_node_id_ << ", " << ready_task_ids.size()
<< " tasks ready";
// Transition the tasks whose dependencies are now fulfilled to the ready state.
if (ready_task_ids.size() > 0) {
std::unordered_set<TaskID> ready_task_id_set(ready_task_ids.begin(),
ready_task_ids.end());
// First filter out the tasks that should not be moved to READY.
local_queues_.FilterState(ready_task_id_set, TaskState::BLOCKED);
local_queues_.FilterState(ready_task_id_set, TaskState::RUNNING);
local_queues_.FilterState(ready_task_id_set, TaskState::DRIVER);
local_queues_.FilterState(ready_task_id_set, TaskState::WAITING_FOR_ACTOR_CREATION);
// Make sure that the remaining tasks are all WAITING or direct call
// actors.
auto ready_task_id_set_copy = ready_task_id_set;
local_queues_.FilterState(ready_task_id_set_copy, TaskState::WAITING);
// Filter out direct call actors. These are not tracked by the raylet and
// their assigned task ID is the actor ID.
for (const auto &id : ready_task_id_set_copy) {
RAY_CHECK(actor_registry_.count(id.ActorId()) > 0);
ready_task_id_set.erase(id);
}
// Queue and dispatch the tasks that are ready to run (i.e., WAITING).
auto ready_tasks = local_queues_.RemoveTasks(ready_task_id_set);
local_queues_.QueueTasks(ready_tasks, TaskState::READY);
DispatchTasks(MakeTasksByClass(ready_tasks));
}
}
bool NodeManager::IsDirectActorCreationTask(const TaskID &task_id) {
auto actor_id = task_id.ActorId();
if (!actor_id.IsNil() && task_id == TaskID::ForActorCreationTask(actor_id)) {
// This task ID corresponds to an actor creation task.
auto iter = actor_registry_.find(actor_id);
if (iter != actor_registry_.end() && iter->second.GetTableData().is_direct_call()) {
// This actor is direct call actor.
return true;
}
}
return false;
}
void NodeManager::HandleObjectMissing(const ObjectID &object_id) {
// Notify the task dependency manager that this object is no longer local.
const auto waiting_task_ids = task_dependency_manager_.HandleObjectMissing(object_id);
std::stringstream result;
result << "Object missing " << object_id << ", "
<< " on " << self_node_id_ << ", " << waiting_task_ids.size()
<< " tasks waiting";
if (waiting_task_ids.size() > 0) {
result << ", tasks: ";
for (const auto &task_id : waiting_task_ids) {
result << task_id << " ";
}
}
RAY_LOG(DEBUG) << result.str();
// Transition any tasks that were in the runnable state and are dependent on
// this object to the waiting state.
if (!waiting_task_ids.empty()) {
std::unordered_set<TaskID> waiting_task_id_set(waiting_task_ids.begin(),
waiting_task_ids.end());
// NOTE(zhijunfu): For direct actors, the worker is initially assigned actor
// creation task ID, which will not be reset after the task finishes. And later tasks
// of this actor will reuse this task ID to require objects from plasma with
// FetchOrReconstruct, since direct actor task IDs are not known to raylet.
// To support actor reconstruction for direct actor, raylet marks actor creation task
// as completed and removes it from `local_queues_` when it receives `TaskDone`
// message from worker. This is necessary because the actor creation task will be
// re-submitted during reconstruction, if the task is not removed previously, the new
// submitted task will be marked as duplicate and thus ignored.
// So here we check for direct actor creation task explicitly to allow this case.
auto iter = waiting_task_id_set.begin();
while (iter != waiting_task_id_set.end()) {
if (IsDirectActorCreationTask(*iter)) {
RAY_LOG(DEBUG) << "Ignoring direct actor creation task " << *iter
<< " when handling object missing for " << object_id;
iter = waiting_task_id_set.erase(iter);
} else {
++iter;
}
}
// First filter out any tasks that can't be transitioned to READY. These
// are running workers or drivers, now blocked in a get.
local_queues_.FilterState(waiting_task_id_set, TaskState::RUNNING);
local_queues_.FilterState(waiting_task_id_set, TaskState::DRIVER);
// Transition the tasks back to the waiting state. They will be made
// runnable once the deleted object becomes available again.
local_queues_.MoveTasks(waiting_task_id_set, TaskState::READY, TaskState::WAITING);
RAY_CHECK(waiting_task_id_set.empty());
// Moving ready tasks to waiting may have changed the load, making space for placing
// new tasks locally.
ScheduleTasks(cluster_resource_map_);
}
}
void NodeManager::ForwardTaskOrResubmit(const Task &task,
const ClientID &node_manager_id) {
/// TODO(rkn): Should we check that the node manager is remote and not local?
/// TODO(rkn): Should we check if the remote node manager is known to be dead?
// Attempt to forward the task.
ForwardTask(
task, node_manager_id,
[this, node_manager_id](ray::Status error, const Task &task) {
const TaskID task_id = task.GetTaskSpecification().TaskId();
RAY_LOG(INFO) << "Failed to forward task " << task_id << " to node manager "
<< node_manager_id;
// Mark the failed task as pending to let other raylets know that we still
// have the task. TaskDependencyManager::TaskPending() is assumed to be
// idempotent.
task_dependency_manager_.TaskPending(task);
// Actor tasks can only be executed at the actor's location, so they are
// retried after a timeout. All other tasks that fail to be forwarded are
// deemed to be placeable again.
if (task.GetTaskSpecification().IsActorTask()) {
// The task is for an actor on another node. Create a timer to resubmit
// the task in a little bit. TODO(rkn): Really this should be a
// unique_ptr instead of a shared_ptr. However, it's a little harder to
// move unique_ptrs into lambdas.
auto retry_timer = std::make_shared<boost::asio::deadline_timer>(io_service_);
auto retry_duration = boost::posix_time::milliseconds(
RayConfig::instance()
.node_manager_forward_task_retry_timeout_milliseconds());
retry_timer->expires_from_now(retry_duration);
retry_timer->async_wait(
[this, task_id, retry_timer](const boost::system::error_code &error) {
// Timer killing will receive the boost::asio::error::operation_aborted,
// we only handle the timeout event.
RAY_CHECK(!error);
RAY_LOG(INFO) << "Resubmitting task " << task_id
<< " because ForwardTask failed.";
// Remove the RESUBMITTED task from the SWAP queue.
Task task;
TaskState state;
if (local_queues_.RemoveTask(task_id, &task, &state)) {
RAY_CHECK(state == TaskState::SWAP);
// Submit the task again.
SubmitTask(task, Lineage());
}
});
// Temporarily move the RESUBMITTED task to the SWAP queue while the
// timer is active.
local_queues_.QueueTasks({task}, TaskState::SWAP);
} else {
// The task is not for an actor and may therefore be placed on another
// node immediately. Send it to the scheduling policy to be placed again.
local_queues_.QueueTasks({task}, TaskState::PLACEABLE);
ScheduleTasks(cluster_resource_map_);
}
});
}
void NodeManager::ForwardTask(
const Task &task, const ClientID &node_id,
const std::function<void(const ray::Status &, const Task &)> &on_error) {
// Override spillback for direct tasks.
if (task.OnSpillback() != nullptr) {
auto node_info = gcs_client_->Nodes().Get(node_id);
RAY_CHECK(node_info)
<< "Spilling back to a node manager, but no GCS info found for node " << node_id;
task.OnSpillback()(node_id, node_info->node_manager_address(),
node_info->node_manager_port());
return;
}
// Lookup node manager client for this node_id and use it to send the request.
auto client_entry = remote_node_manager_clients_.find(node_id);
if (client_entry == remote_node_manager_clients_.end()) {
// TODO(atumanov): caller must handle failure to ensure tasks are not lost.
RAY_LOG(INFO) << "No node manager client found for GCS client id " << node_id;
on_error(ray::Status::IOError("Node manager client not found"), task);
return;
}
auto &client = client_entry->second;
const auto &spec = task.GetTaskSpecification();
auto task_id = spec.TaskId();
if (worker_pool_.HasPendingWorkerForTask(spec.GetLanguage(), task_id)) {
// There is a worker being starting for this task,
// so we shouldn't forward this task to another node.
return;
}
// Get the task's unforwarded, uncommitted lineage.
Lineage uncommitted_lineage = lineage_cache_.GetUncommittedLineage(task_id, node_id);
if (uncommitted_lineage.GetEntries().empty()) {
// There is no uncommitted lineage. This can happen if the lineage was
// already evicted before we forwarded the task.
uncommitted_lineage.SetEntry(task, GcsStatus::NONE);
}
auto entry = uncommitted_lineage.GetEntryMutable(task_id);
Task &lineage_cache_entry_task = entry->TaskDataMutable();
// Increment forward count for the forwarded task.
lineage_cache_entry_task.IncrementNumForwards();
RAY_LOG(DEBUG) << "Forwarding task " << task_id << " from " << self_node_id_ << " to "
<< node_id << " spillback="
<< lineage_cache_entry_task.GetTaskExecutionSpec().NumForwards();
// Prepare the request message.
rpc::ForwardTaskRequest request;
request.set_task_id(task_id.Binary());
for (auto &task_entry : uncommitted_lineage.GetEntries()) {
auto task = request.add_uncommitted_tasks();
task->mutable_task_spec()->CopyFrom(
task_entry.second.TaskData().GetTaskSpecification().GetMessage());
task->mutable_task_execution_spec()->CopyFrom(
task_entry.second.TaskData().GetTaskExecutionSpec().GetMessage());
}
client->ForwardTask(request, [this, on_error, task, task_id, node_id](
Status status, const rpc::ForwardTaskReply &reply) {
if (local_queues_.HasTask(task_id)) {
// It must have been forwarded back to us if it's in the queue again
// so just return here.
return;
}
if (status.ok()) {
const auto &spec = task.GetTaskSpecification();
// Mark as forwarded so that the task and its lineage are not
// re-forwarded in the future to the receiving node.
lineage_cache_.MarkTaskAsForwarded(task_id, node_id);
// Notify the task dependency manager that we are no longer responsible
// for executing this task.
task_dependency_manager_.TaskCanceled(task_id);
// Preemptively push any local arguments to the receiving node. For now, we
// only do this with actor tasks, since actor tasks must be executed by a
// specific process and therefore have affinity to the receiving node.
if (spec.IsActorTask()) {
// Iterate through the object's arguments. NOTE(swang): We do not include
// the execution dependencies here since those cannot be transferred
// between nodes.
for (size_t i = 0; i < spec.NumArgs(); ++i) {
int count = spec.ArgIdCount(i);
for (int j = 0; j < count; j++) {
ObjectID argument_id = spec.ArgId(i, j);
// If the argument is local, then push it to the receiving node.
if (task_dependency_manager_.CheckObjectLocal(argument_id)) {
object_manager_.Push(argument_id, node_id);
}
}
}
}
} else {
on_error(status, task);
}
});
}
void NodeManager::FinishAssignTask(const std::shared_ptr<Worker> &worker,
const TaskID &task_id, bool success) {
RAY_LOG(DEBUG) << "FinishAssignTask: " << task_id;
// Remove the ASSIGNED task from the READY queue.
Task assigned_task;
TaskState state;
if (!local_queues_.RemoveTask(task_id, &assigned_task, &state)) {
// TODO(edoakes): should we be failing silently here?
return;
}
RAY_CHECK(state == TaskState::READY);
if (success) {
auto spec = assigned_task.GetTaskSpecification();
// We successfully assigned the task to the worker.
worker->AssignTaskId(spec.TaskId());
worker->AssignJobId(spec.JobId());
// TODO(swang): For actors with multiple actor handles, to
// guarantee that tasks are replayed in the same order after a
// failure, we must update the task's execution dependency to be
// the actor's current execution dependency.
// Mark the task as running.
// (See design_docs/task_states.rst for the state transition diagram.)
local_queues_.QueueTasks({assigned_task}, TaskState::RUNNING);
// Notify the task dependency manager that we no longer need this task's
// object dependencies.
RAY_CHECK(task_dependency_manager_.UnsubscribeGetDependencies(spec.TaskId()));
} else {
RAY_LOG(WARNING) << "Failed to send task to worker, disconnecting client";
// We failed to send the task to the worker, so disconnect the worker.
ProcessDisconnectClientMessage(worker->Connection());
// Queue this task for future assignment. We need to do this since
// DispatchTasks() removed it from the ready queue. The task will be
// assigned to a worker once one becomes available.
// (See design_docs/task_states.rst for the state transition diagram.)
local_queues_.QueueTasks({assigned_task}, TaskState::READY);
DispatchTasks(MakeTasksByClass({assigned_task}));
}
}
void NodeManager::DumpDebugState() const {
std::fstream fs;
fs.open(initial_config_.session_dir + "/debug_state.txt",
std::fstream::out | std::fstream::trunc);
fs << DebugString();
fs.close();
}
const NodeManagerConfig &NodeManager::GetInitialConfig() const { return initial_config_; }
std::string NodeManager::DebugString() const {
std::stringstream result;
uint64_t now_ms = current_time_ms();
result << "NodeManager:";
result << "\nInitialConfigResources: " << initial_config_.resource_config.ToString();
result << "\nClusterResources:";
for (auto &pair : cluster_resource_map_) {
result << "\n" << pair.first.Hex() << ": " << pair.second.DebugString();
}
result << "\n" << object_manager_.DebugString();
result << "\n" << gcs_client_->DebugString();
result << "\n" << worker_pool_.DebugString();
result << "\n" << local_queues_.DebugString();
result << "\n" << reconstruction_policy_.DebugString();
result << "\n" << task_dependency_manager_.DebugString();
result << "\n" << lineage_cache_.DebugString();
result << "\nActorRegistry:";
auto statistical_data = GetActorStatisticalData(actor_registry_);
result << "\n- num live actors: " << statistical_data.live_actors;
result << "\n- num reconstructing actors: " << statistical_data.reconstructing_actors;
result << "\n- num dead actors: " << statistical_data.dead_actors;
result << "\n- max num handles: " << statistical_data.max_num_handles;
result << "\nRemote node manager clients: ";
for (const auto &entry : remote_node_manager_clients_) {
result << "\n" << entry.first;
}
result << "\nDebugString() time ms: " << (current_time_ms() - now_ms);
return result.str();
}
// Summarizes a Census view and tag values into a compact string, e.g.,
// "Tag1:Value1,Tag2:Value2,Tag3:Value3".
std::string compact_tag_string(const opencensus::stats::ViewDescriptor &view,
const std::vector<std::string> &values) {
std::stringstream result;
const auto &keys = view.columns();
for (size_t i = 0; i < values.size(); i++) {
result << keys[i].name() << ":" << values[i];
if (i < values.size() - 1) {
result << ",";
}
}
return result.str();
}
void NodeManager::HandlePinObjectIDs(const rpc::PinObjectIDsRequest &request,
rpc::PinObjectIDsReply *reply,
rpc::SendReplyCallback send_reply_callback) {
if (!object_pinning_enabled_) {
send_reply_callback(Status::OK(), nullptr, nullptr);
return;
}
WorkerID worker_id = WorkerID::FromBinary(request.owner_address().worker_id());
auto it = worker_rpc_clients_.find(worker_id);
if (it == worker_rpc_clients_.end()) {
auto client = std::unique_ptr<rpc::CoreWorkerClient>(
new rpc::CoreWorkerClient(request.owner_address().ip_address(),
request.owner_address().port(), client_call_manager_));
it = worker_rpc_clients_
.emplace(worker_id,
std::make_pair<std::unique_ptr<rpc::CoreWorkerClient>, size_t>(
std::move(client), 0))
.first;
}
// Pin the objects in plasma by getting them and holding a reference to
// the returned buffer.
// NOTE: the caller must ensure that the objects already exist in plamsa before
// sending a PinObjectIDs request.
std::vector<plasma::ObjectID> plasma_ids;
plasma_ids.reserve(request.object_ids_size());
for (const auto &object_id_binary : request.object_ids()) {
plasma_ids.push_back(plasma::ObjectID::from_binary(object_id_binary));
}
std::vector<plasma::ObjectBuffer> plasma_results;
if (!store_client_.Get(plasma_ids, /*timeout_ms=*/0, &plasma_results).ok()) {
RAY_LOG(WARNING) << "Failed to get objects to be pinned from object store.";
send_reply_callback(Status::Invalid("Failed to get objects."), nullptr, nullptr);
return;
}
// Pin the requested objects until the owner notifies us that the objects can be
// unpinned by responding to the WaitForObjectEviction message.
// TODO(edoakes): we should be batching these requests instead of sending one per
// pinned object.
size_t i = 0;
for (const auto &object_id_binary : request.object_ids()) {
ObjectID object_id = ObjectID::FromBinary(object_id_binary);
RAY_LOG(DEBUG) << "Pinning object " << object_id;
pinned_objects_.emplace(
object_id, std::unique_ptr<RayObject>(new RayObject(
std::make_shared<PlasmaBuffer>(plasma_results[i].data),
std::make_shared<PlasmaBuffer>(plasma_results[i].metadata))));
i++;
// Send a long-running RPC request to the owner for each object. When we get a
// response or the RPC fails (due to the owner crashing), unpin the object.
rpc::WaitForObjectEvictionRequest wait_request;
wait_request.set_object_id(object_id_binary);
wait_request.set_intended_worker_id(request.owner_address().worker_id());
worker_rpc_clients_[worker_id].second++;
RAY_CHECK_OK(it->second.first->WaitForObjectEviction(
wait_request, [this, worker_id, object_id](
Status status, const rpc::WaitForObjectEvictionReply &reply) {
if (!status.ok()) {
RAY_LOG(WARNING) << "Worker " << worker_id << " failed. Unpinning object "
<< object_id;
}
RAY_LOG(DEBUG) << "Unpinning object " << object_id;
pinned_objects_.erase(object_id);
// Remove the cached worker client if there are no more pending requests.
if (--worker_rpc_clients_[worker_id].second == 0) {
worker_rpc_clients_.erase(worker_id);
}
}));
}
send_reply_callback(Status::OK(), nullptr, nullptr);
}
void NodeManager::HandleGetNodeStats(const rpc::GetNodeStatsRequest &request,
rpc::GetNodeStatsReply *reply,
rpc::SendReplyCallback send_reply_callback) {
for (const auto &driver : worker_pool_.GetAllDrivers()) {
auto worker_stats = reply->add_workers_stats();
worker_stats->set_pid(driver->Process().get()->id());
worker_stats->set_is_driver(true);
}
for (const auto task : local_queues_.GetTasks(TaskState::INFEASIBLE)) {
auto infeasible_task = reply->add_infeasible_tasks();
infeasible_task->ParseFromString(task.GetTaskSpecification().Serialize());
}
// Ensure we never report an empty set of metrics.
if (!recorded_metrics_) {
RecordMetrics();
RAY_CHECK(recorded_metrics_);
}
for (const auto &view : opencensus::stats::StatsExporter::GetViewData()) {
auto view_data = reply->add_view_data();
view_data->set_view_name(view.first.name());
if (view.second.type() == opencensus::stats::ViewData::Type::kInt64) {
for (const auto &measure : view.second.int_data()) {
auto measure_data = view_data->add_measures();
measure_data->set_tags(compact_tag_string(view.first, measure.first));
measure_data->set_int_value(measure.second);
}
} else if (view.second.type() == opencensus::stats::ViewData::Type::kDouble) {
for (const auto &measure : view.second.double_data()) {
auto measure_data = view_data->add_measures();
measure_data->set_tags(compact_tag_string(view.first, measure.first));
measure_data->set_double_value(measure.second);
}
} else {
RAY_CHECK(view.second.type() == opencensus::stats::ViewData::Type::kDistribution);
for (const auto &measure : view.second.distribution_data()) {
auto measure_data = view_data->add_measures();
measure_data->set_tags(compact_tag_string(view.first, measure.first));
measure_data->set_distribution_min(measure.second.min());
measure_data->set_distribution_mean(measure.second.mean());
measure_data->set_distribution_max(measure.second.max());
measure_data->set_distribution_count(measure.second.count());
for (const auto &bound : measure.second.bucket_boundaries().lower_boundaries()) {
measure_data->add_distribution_bucket_boundaries(bound);
}
for (const auto &count : measure.second.bucket_counts()) {
measure_data->add_distribution_bucket_counts(count);
}
}
}
}
// As a result of the HandleGetNodeStats, we are collecting information from all
// workers on this node. This is done by calling GetCoreWorkerStats on each worker. In
// order to send up-to-date information back, we wait until all workers have replied,
// and return the information from HandleNodesStatsRequest. The caller of
// HandleGetNodeStats should set a timeout so that the rpc finishes even if not all
// workers have replied.
auto all_workers = worker_pool_.GetAllWorkers();
for (const auto &worker : all_workers) {
rpc::GetCoreWorkerStatsRequest request;
request.set_intended_worker_id(worker->WorkerId().Binary());
auto status = worker->rpc_client()->GetCoreWorkerStats(
request, [reply, worker, all_workers, send_reply_callback](
const ray::Status &status, const rpc::GetCoreWorkerStatsReply &r) {
if (!status.ok()) {
RAY_LOG(WARNING) << "Failed to send get core worker stats request: "
<< status.ToString();
} else {
auto worker_stats = reply->add_workers_stats();
worker_stats->set_pid(worker->Process().get()->id());
worker_stats->set_is_driver(false);
reply->set_num_workers(reply->num_workers() + 1);
worker_stats->mutable_core_worker_stats()->MergeFrom(r.core_worker_stats());
if (reply->num_workers() == all_workers.size()) {
send_reply_callback(Status::OK(), nullptr, nullptr);
}
}
});
if (!status.ok()) {
RAY_LOG(WARNING) << "Failed to send get core worker stats request: "
<< status.ToString();
}
}
}
void NodeManager::RecordMetrics() {
recorded_metrics_ = true;
if (stats::StatsConfig::instance().IsStatsDisabled()) {
return;
}
// Record available resources of this node.
const auto &available_resources =
cluster_resource_map_.at(self_node_id_).GetAvailableResources().GetResourceMap();
for (const auto &pair : available_resources) {
stats::LocalAvailableResource().Record(pair.second,
{{stats::ResourceNameKey, pair.first}});
}
// Record total resources of this node.
const auto &total_resources =
cluster_resource_map_.at(self_node_id_).GetTotalResources().GetResourceMap();
for (const auto &pair : total_resources) {
stats::LocalTotalResource().Record(pair.second,
{{stats::ResourceNameKey, pair.first}});
}
object_manager_.RecordMetrics();
worker_pool_.RecordMetrics();
local_queues_.RecordMetrics();
reconstruction_policy_.RecordMetrics();
task_dependency_manager_.RecordMetrics();
lineage_cache_.RecordMetrics();
auto statistical_data = GetActorStatisticalData(actor_registry_);
stats::ActorStats().Record(statistical_data.live_actors,
{{stats::ValueTypeKey, "live_actors"}});
stats::ActorStats().Record(statistical_data.reconstructing_actors,
{{stats::ValueTypeKey, "reconstructing_actors"}});
stats::ActorStats().Record(statistical_data.dead_actors,
{{stats::ValueTypeKey, "dead_actors"}});
stats::ActorStats().Record(statistical_data.max_num_handles,
{{stats::ValueTypeKey, "max_num_handles"}});
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/node_manager.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_NODE_MANAGER_H
#define RAY_RAYLET_NODE_MANAGER_H
#include <boost/asio/steady_timer.hpp>
// clang-format off
#include "ray/rpc/grpc_client.h"
#include "ray/rpc/node_manager/node_manager_server.h"
#include "ray/rpc/node_manager/node_manager_client.h"
#include "ray/common/task/task.h"
#include "ray/common/ray_object.h"
#include "ray/common/client_connection.h"
#include "ray/common/task/task_common.h"
#include "ray/common/task/scheduling_resources.h"
#include "ray/common/scheduling/scheduling_ids.h"
#include "ray/common/scheduling/cluster_resource_scheduler.h"
#include "ray/object_manager/object_manager.h"
#include "ray/raylet/actor_registration.h"
#include "ray/raylet/lineage_cache.h"
#include "ray/raylet/scheduling_policy.h"
#include "ray/raylet/scheduling_queue.h"
#include "ray/raylet/reconstruction_policy.h"
#include "ray/raylet/task_dependency_manager.h"
#include "ray/raylet/worker_pool.h"
#include "ray/util/ordered_set.h"
// clang-format on
namespace ray {
namespace raylet {
using rpc::ActorTableData;
using rpc::ErrorType;
using rpc::GcsNodeInfo;
using rpc::HeartbeatBatchTableData;
using rpc::HeartbeatTableData;
using rpc::JobTableData;
struct NodeManagerConfig {
/// The node's resource configuration.
ResourceSet resource_config;
/// The IP address this node manager is running on.
std::string node_manager_address;
/// The port to use for listening to incoming connections. If this is 0 then
/// the node manager will choose its own port.
int node_manager_port;
/// The initial number of workers to create.
int num_initial_workers;
/// The maximum number of workers that can be started concurrently by a
/// worker pool.
int maximum_startup_concurrency;
/// The commands used to start the worker process, grouped by language.
WorkerCommandMap worker_commands;
/// The time between heartbeats in milliseconds.
uint64_t heartbeat_period_ms;
/// The time between debug dumps in milliseconds, or -1 to disable.
uint64_t debug_dump_period_ms;
/// Whether to enable fair queueing between task classes in raylet.
bool fair_queueing_enabled;
/// Whether to enable pinning for plasma objects.
bool object_pinning_enabled;
/// the maximum lineage size.
uint64_t max_lineage_size;
/// The store socket name.
std::string store_socket_name;
/// The path to the ray temp dir.
std::string temp_dir;
/// The path of this ray session dir.
std::string session_dir;
};
class NodeManager : public rpc::NodeManagerServiceHandler {
public:
/// Create a node manager.
///
/// \param resource_config The initial set of node resources.
/// \param object_manager A reference to the local object manager.
NodeManager(boost::asio::io_service &io_service, const ClientID &self_node_id,
const NodeManagerConfig &config, ObjectManager &object_manager,
std::shared_ptr<gcs::GcsClient> gcs_client,
std::shared_ptr<ObjectDirectoryInterface> object_directory_);
/// Process a new client connection.
///
/// \param client The client to process.
/// \return Void.
void ProcessNewClient(LocalClientConnection &client);
/// Process a message from a client. This method is responsible for
/// explicitly listening for more messages from the client if the client is
/// still alive.
///
/// \param client The client that sent the message.
/// \param message_type The message type (e.g., a flatbuffer enum).
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessClientMessage(const std::shared_ptr<LocalClientConnection> &client,
int64_t message_type, const uint8_t *message_data);
/// Subscribe to the relevant GCS tables and set up handlers.
///
/// \return Status indicating whether this was done successfully or not.
ray::Status RegisterGcs();
/// Get initial node manager configuration.
const NodeManagerConfig &GetInitialConfig() const;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics();
/// Get the port of the node manager rpc server.
int GetServerPort() const { return node_manager_server_.GetPort(); }
private:
/// Methods for handling clients.
/// Handle an unexpected failure notification from GCS pubsub.
///
/// \param worker_id The ID of the failed worker.
/// \param worker_data Data associated with the worker failure.
void HandleUnexpectedWorkerFailure(const WorkerID &worker_id,
const gcs::WorkerFailureData &worker_failed_data);
/// Handler for the addition of a new node.
///
/// \param data Data associated with the new node.
/// \return Void.
void NodeAdded(const GcsNodeInfo &data);
/// Handler for the removal of a GCS node.
/// \param node_info Data associated with the removed node.
/// \return Void.
void NodeRemoved(const GcsNodeInfo &node_info);
/// Handler for the addition or updation of a resource in the GCS
/// \param client_id ID of the node that created or updated resources.
/// \param createUpdatedResources Created or updated resources.
/// \return Void.
void ResourceCreateUpdated(const ClientID &client_id,
const ResourceSet &createUpdatedResources);
/// Handler for the deletion of a resource in the GCS
/// \param client_id ID of the node that deleted resources.
/// \param resource_names Names of deleted resources.
/// \return Void.
void ResourceDeleted(const ClientID &client_id,
const std::vector<std::string> &resource_names);
/// Evaluates the local infeasible queue to check if any tasks can be scheduled.
/// This is called whenever there's an update to the resources on the local client.
/// \return Void.
void TryLocalInfeasibleTaskScheduling();
/// Send heartbeats to the GCS.
void Heartbeat();
/// Write out debug state to a file.
void DumpDebugState() const;
/// Get profiling information from the object manager and push it to the GCS.
///
/// \return Void.
void GetObjectManagerProfileInfo();
/// Handler for a heartbeat notification from the GCS.
///
/// \param id The ID of the node manager that sent the heartbeat.
/// \param data The heartbeat data including load information.
/// \return Void.
void HeartbeatAdded(const ClientID &id, const HeartbeatTableData &data);
/// Handler for a heartbeat batch notification from the GCS
///
/// \param heartbeat_batch The batch of heartbeat data.
void HeartbeatBatchAdded(const HeartbeatBatchTableData &heartbeat_batch);
/// Methods for task scheduling.
/// Enqueue a placeable task to wait on object dependencies or be ready for
/// dispatch.
///
/// \param task The task in question.
/// \return Void.
void EnqueuePlaceableTask(const Task &task);
/// This will treat a task removed from the local queue as if it had been
/// executed and failed. This is done by looping over the task return IDs and
/// for each ID storing an object that represents a failure in the object
/// store. When clients retrieve these objects, they will raise
/// application-level exceptions. State for the task will be cleaned up as if
/// it were any other task that had been assigned, executed, and removed from
/// the local queue.
///
/// \param task The task to fail.
/// \param error_type The type of the error that caused this task to fail.
/// \return Void.
void TreatTaskAsFailed(const Task &task, const ErrorType &error_type);
/// Mark the specified objects as failed with the given error type.
///
/// \param error_type The type of the error that caused this task to fail.
/// \param object_ids The object ids to store error messages into.
/// \param job_id The optional job to push errors to if the writes fail.
void MarkObjectsAsFailed(const ErrorType &error_type,
const std::vector<plasma::ObjectID> object_ids,
const JobID &job_id);
/// This is similar to TreatTaskAsFailed, but it will only mark the task as
/// failed if at least one of the task's return values is lost. A return
/// value is lost if it has been created before, but no longer exists on any
/// nodes, due to either node failure or eviction.
///
/// \param task The task to potentially fail.
/// \return Void.
void TreatTaskAsFailedIfLost(const Task &task);
/// Handle specified task's submission to the local node manager.
///
/// \param task The task being submitted.
/// \param uncommitted_lineage The uncommitted lineage of the task.
/// \param forwarded True if the task has been forwarded from a different
/// node manager and false if it was submitted by a local worker.
/// \return Void.
void SubmitTask(const Task &task, const Lineage &uncommitted_lineage,
bool forwarded = false);
/// Assign a task to a worker. The task is assumed to not be queued in local_queues_.
///
/// \param[in] worker The worker to assign the task to.
/// \param[in] task The task in question.
/// \param[out] post_assign_callbacks Vector of callbacks that will be appended
/// to with any logic that should run after the DispatchTasks loop runs.
void AssignTask(const std::shared_ptr<Worker> &worker, const Task &task,
std::vector<std::function<void()>> *post_assign_callbacks);
/// Handle a worker finishing its assigned task.
///
/// \param worker The worker that finished the task.
/// \return Whether the worker should be returned to the idle pool. This is
/// only false for direct actor creation calls, which should never be
/// returned to idle.
bool FinishAssignedTask(Worker &worker);
/// Helper function to produce actor table data for a newly created actor.
///
/// \param task_spec Task specification of the actor creation task that created the
/// actor.
/// \param worker The port that the actor is listening on.
std::shared_ptr<ActorTableData> CreateActorTableDataFromCreationTask(
const TaskSpecification &task_spec, int port, const WorkerID &worker_id);
/// Handle a worker finishing an assigned actor task or actor creation task.
/// \param worker The worker that finished the task.
/// \param task The actor task or actor creation task.
/// \return Void.
void FinishAssignedActorTask(Worker &worker, const Task &task);
/// Helper function for handling worker to finish its assigned actor task
/// or actor creation task. Gets invoked when tasks's parent actor is known.
///
/// \param parent_actor_id The actor id corresponding to the actor which creates
/// the new actor.
/// \param task_spec Task specification of the actor creation task that created the
/// actor.
/// \param resumed_from_checkpoint If the actor was resumed from a checkpoint.
/// \param port Rpc server port that the actor is listening on.
/// \return Void.
void FinishAssignedActorCreationTask(const ActorID &parent_actor_id,
const TaskSpecification &task_spec,
bool resumed_from_checkpoint, int port,
const WorkerID &worker_id);
/// Make a placement decision for placeable tasks given the resource_map
/// provided. This will perform task state transitions and task forwarding.
///
/// \param resource_map A mapping from node manager ID to an estimate of the
/// resources available to that node manager. Scheduling decisions will only
/// consider the local node manager and the node managers in the keys of the
/// resource_map argument.
/// \return Void.
void ScheduleTasks(std::unordered_map<ClientID, SchedulingResources> &resource_map);
/// Handle a task whose return value(s) must be reconstructed.
///
/// \param task_id The relevant task ID.
/// \param required_object_id The object id we are reconstructing for.
/// \return Void.
void HandleTaskReconstruction(const TaskID &task_id,
const ObjectID &required_object_id);
/// Resubmit a task for execution. This is a task that was previously already
/// submitted to a raylet but which must now be re-executed.
///
/// \param task The task being resubmitted.
/// \param required_object_id The object id that triggered the resubmission.
/// \return Void.
void ResubmitTask(const Task &task, const ObjectID &required_object_id);
/// Attempt to forward a task to a remote different node manager. If this
/// fails, the task will be resubmit locally.
///
/// \param task The task in question.
/// \param node_manager_id The ID of the remote node manager.
/// \return Void.
void ForwardTaskOrResubmit(const Task &task, const ClientID &node_manager_id);
/// Forward a task to another node to execute. The task is assumed to not be
/// queued in local_queues_.
///
/// \param task The task to forward.
/// \param node_id The ID of the node to forward the task to.
/// \param on_error Callback on run on non-ok status.
void ForwardTask(
const Task &task, const ClientID &node_id,
const std::function<void(const ray::Status &, const Task &)> &on_error);
/// Dispatch locally scheduled tasks. This attempts the transition from "scheduled" to
/// "running" task state.
///
/// This function is called in the following cases:
/// (1) A set of new tasks is added to the ready queue.
/// (2) New resources are becoming available on the local node.
/// (3) A new worker becomes available.
/// Note in case (1) we only need to look at the new tasks added to the
/// ready queue, as we know that the old tasks in the ready queue cannot
/// be scheduled (We checked those tasks last time new resources or
/// workers became available, and nothing changed since then.) In this case,
/// tasks_with_resources contains only the newly added tasks to the
/// ready queue. Otherwise, tasks_with_resources points to entire ready queue.
/// \param tasks_with_resources Mapping from resource shapes to tasks with
/// that resource shape.
void DispatchTasks(
const std::unordered_map<SchedulingClass, ordered_set<TaskID>> &tasks_by_class);
/// Handle blocking gets of objects. This could be a task assigned to a worker,
/// an out-of-band task (e.g., a thread created by the application), or a
/// driver task. This can be triggered when a client starts a get call or a
/// wait call.
///
/// \param client The client that is executing the blocked task.
/// \param required_object_ids The IDs that the client is blocked waiting for.
/// \param current_task_id The task that is blocked.
/// \param ray_get Whether the task is blocked in a `ray.get` call.
/// \param mark_worker_blocked Whether to mark the worker as blocked. This
/// should be False for direct calls.
/// \return Void.
void AsyncResolveObjects(const std::shared_ptr<LocalClientConnection> &client,
const std::vector<ObjectID> &required_object_ids,
const TaskID ¤t_task_id, bool ray_get,
bool mark_worker_blocked);
/// Handle end of a blocking object get. This could be a task assigned to a
/// worker, an out-of-band task (e.g., a thread created by the application),
/// or a driver task. This can be triggered when a client finishes a get call
/// or a wait call. The given task must be blocked, via a previous call to
/// AsyncResolveObjects.
///
/// \param client The client that is executing the unblocked task.
/// \param current_task_id The task that is unblocked.
/// \param worker_was_blocked Whether we previously marked the worker as
/// blocked in AsyncResolveObjects().
/// \return Void.
void AsyncResolveObjectsFinish(const std::shared_ptr<LocalClientConnection> &client,
const TaskID ¤t_task_id, bool was_blocked);
/// Handle a direct call task that is blocked. Note that this callback may
/// arrive after the worker lease has been returned to the node manager.
///
/// \param worker Shared ptr to the worker, or nullptr if lost.
void HandleDirectCallTaskBlocked(const std::shared_ptr<Worker> &worker);
/// Handle a direct call task that is unblocked. Note that this callback may
/// arrive after the worker lease has been returned to the node manager.
/// However, it is guaranteed to arrive after DirectCallTaskBlocked.
///
/// \param worker Shared ptr to the worker, or nullptr if lost.
void HandleDirectCallTaskUnblocked(const std::shared_ptr<Worker> &worker);
/// Kill a worker.
///
/// \param worker The worker to kill.
/// \return Void.
void KillWorker(std::shared_ptr<Worker> worker);
/// The callback for handling an actor state transition (e.g., from ALIVE to
/// DEAD), whether as a notification from the actor table or as a handler for
/// a local actor's state transition. This method is idempotent and will ignore
/// old state transition.
///
/// \param actor_id The actor ID of the actor whose state was updated.
/// \param actor_registration The ActorRegistration object that represents actor's
/// new state.
/// \return Void.
void HandleActorStateTransition(const ActorID &actor_id,
ActorRegistration &&actor_registration);
/// When a job finished, loop over all of the queued tasks for that job and
/// treat them as failed.
///
/// \param job_id The job that exited.
/// \return Void.
void CleanUpTasksForFinishedJob(const JobID &job_id);
/// Handle an object becoming local. This updates any local accounting, but
/// does not write to any global accounting in the GCS.
///
/// \param object_id The object that is locally available.
/// \return Void.
void HandleObjectLocal(const ObjectID &object_id);
/// Handle an object that is no longer local. This updates any local
/// accounting, but does not write to any global accounting in the GCS.
///
/// \param object_id The object that has been evicted locally.
/// \return Void.
void HandleObjectMissing(const ObjectID &object_id);
/// Handles the event that a job is finished.
///
/// \param job_id ID of the finished job.
/// \param job_data Data associated with the finished job.
/// \return Void.
void HandleJobFinished(const JobID &job_id, const JobTableData &job_data);
/// Check if certain invariants associated with the task dependency manager
/// and the local queues are satisfied. This is only used for debugging
/// purposes.
///
/// \return True if the invariants are satisfied and false otherwise.
bool CheckDependencyManagerInvariant() const;
/// Process client message of SubmitTask
///
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessSubmitTaskMessage(const uint8_t *message_data);
/// Process client message of RegisterClientRequest
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessRegisterClientRequestMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data);
/// Handle the case that a worker is available.
///
/// \param client The connection for the worker.
/// \return Void.
void HandleWorkerAvailable(const std::shared_ptr<LocalClientConnection> &client);
/// Handle the case that a worker is available.
///
/// \param worker The pointer to the worker
/// \return Void.
void HandleWorkerAvailable(const std::shared_ptr<Worker> &worker);
/// Handle a client that has disconnected. This can be called multiple times
/// on the same client because this is triggered both when a client
/// disconnects and when the node manager fails to write a message to the
/// client.
///
/// \param client The client that sent the message.
/// \param intentional_disconnect Whether the client was intentionally disconnected.
/// \return Void.
void ProcessDisconnectClientMessage(
const std::shared_ptr<LocalClientConnection> &client,
bool intentional_disconnect = false);
/// Process client message of FetchOrReconstruct
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessFetchOrReconstructMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data);
/// Process client message of WaitRequest
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessWaitRequestMessage(const std::shared_ptr<LocalClientConnection> &client,
const uint8_t *message_data);
/// Process client message of WaitForDirectActorCallArgsRequest
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessWaitForDirectActorCallArgsRequestMessage(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data);
/// Process client message of PushErrorRequest
///
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessPushErrorRequestMessage(const uint8_t *message_data);
/// Process client message of PrepareActorCheckpointRequest.
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
void ProcessPrepareActorCheckpointRequest(
const std::shared_ptr<LocalClientConnection> &client, const uint8_t *message_data);
/// Process client message of NotifyActorResumedFromCheckpoint.
///
/// \param message_data A pointer to the message data.
void ProcessNotifyActorResumedFromCheckpoint(const uint8_t *message_data);
/// Process client message of ReportActiveObjectIDs.
///
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
void ProcessReportActiveObjectIDs(const std::shared_ptr<LocalClientConnection> &client,
const uint8_t *message_data);
/// Update actor frontier when a task finishes.
/// If the task is an actor creation task and the actor was resumed from a checkpoint,
/// restore the frontier from the checkpoint. Otherwise, just extend actor frontier.
///
/// \param task The task that just finished.
void UpdateActorFrontier(const Task &task);
/// Process client message of SetResourceRequest
/// \param client The client that sent the message.
/// \param message_data A pointer to the message data.
/// \return Void.
void ProcessSetResourceRequest(const std::shared_ptr<LocalClientConnection> &client,
const uint8_t *message_data);
/// Handle the case where an actor is disconnected, determine whether this
/// actor needs to be reconstructed and then update actor table.
/// This function needs to be called either when actor process dies or when
/// a node dies.
///
/// \param actor_id Id of this actor.
/// \param was_local Whether the disconnected was on this local node.
/// \param intentional_disconnect Wether the client was intentionally disconnected.
/// \return Void.
void HandleDisconnectedActor(const ActorID &actor_id, bool was_local,
bool intentional_disconnect);
/// Finish assigning a task to a worker.
///
/// \param worker Worker that the task is assigned to.
/// \param task_id Id of the task.
/// \param success Whether or not assigning the task was successful.
/// \return void.
void FinishAssignTask(const std::shared_ptr<Worker> &worker, const TaskID &task_id,
bool success);
/// Handle a `WorkerLease` request.
void HandleRequestWorkerLease(const rpc::RequestWorkerLeaseRequest &request,
rpc::RequestWorkerLeaseReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle a `ReturnWorker` request.
void HandleReturnWorker(const rpc::ReturnWorkerRequest &request,
rpc::ReturnWorkerReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle a `ForwardTask` request.
void HandleForwardTask(const rpc::ForwardTaskRequest &request,
rpc::ForwardTaskReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle a `PinObjectIDs` request.
void HandlePinObjectIDs(const rpc::PinObjectIDsRequest &request,
rpc::PinObjectIDsReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Handle a `NodeStats` request.
void HandleGetNodeStats(const rpc::GetNodeStatsRequest &request,
rpc::GetNodeStatsReply *reply,
rpc::SendReplyCallback send_reply_callback) override;
/// Push an error to the driver if this node is full of actors and so we are
/// unable to schedule new tasks or actors at all.
void WarnResourceDeadlock();
/// Dispatch tasks to available workers.
void DispatchScheduledTasksToWorkers();
/// For the pending task at the head of tasks_to_schedule_, return a node
/// in the system (local or remote) that has enough resources available to
/// run the task, if any such node exist.
/// Repeat the process as long as we can schedule a task.
void NewSchedulerSchedulePendingTasks();
/// Whether a task is an direct actor creation task.
bool IsDirectActorCreationTask(const TaskID &task_id);
/// ID of this node.
ClientID self_node_id_;
boost::asio::io_service &io_service_;
ObjectManager &object_manager_;
/// A Plasma object store client. This is used for creating new objects in
/// the object store (e.g., for actor tasks that can't be run because the
/// actor died) and to pin objects that are in scope in the cluster.
plasma::PlasmaClient store_client_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// The object table. This is shared with the object manager.
std::shared_ptr<ObjectDirectoryInterface> object_directory_;
/// The timer used to send heartbeats.
boost::asio::steady_timer heartbeat_timer_;
/// The period used for the heartbeat timer.
std::chrono::milliseconds heartbeat_period_;
/// The period between debug state dumps.
int64_t debug_dump_period_;
/// Whether to enable fair queueing between task classes in raylet.
bool fair_queueing_enabled_;
/// Whether to enable pinning for plasma objects.
bool object_pinning_enabled_;
/// Whether we have printed out a resource deadlock warning.
bool resource_deadlock_warned_ = false;
/// Whether we have recorded any metrics yet.
bool recorded_metrics_ = false;
/// The path to the ray temp dir.
std::string temp_dir_;
/// The timer used to get profiling information from the object manager and
/// push it to the GCS.
boost::asio::steady_timer object_manager_profile_timer_;
/// The time that the last heartbeat was sent at. Used to make sure we are
/// keeping up with heartbeats.
uint64_t last_heartbeat_at_ms_;
/// The time that the last debug string was logged to the console.
uint64_t last_debug_dump_at_ms_;
/// Initial node manager configuration.
const NodeManagerConfig initial_config_;
/// The resources (and specific resource IDs) that are currently available.
ResourceIdSet local_available_resources_;
std::unordered_map<ClientID, SchedulingResources> cluster_resource_map_;
/// A pool of workers.
WorkerPool worker_pool_;
/// A set of queues to maintain tasks.
SchedulingQueue local_queues_;
/// The scheduling policy in effect for this raylet.
SchedulingPolicy scheduling_policy_;
/// The reconstruction policy for deciding when to re-execute a task.
ReconstructionPolicy reconstruction_policy_;
/// A manager to make waiting tasks's missing object dependencies available.
TaskDependencyManager task_dependency_manager_;
/// The lineage cache for the GCS object and task tables.
LineageCache lineage_cache_;
/// A mapping from actor ID to registration information about that actor
/// (including which node manager owns it).
std::unordered_map<ActorID, ActorRegistration> actor_registry_;
/// This map stores actor ID to the ID of the checkpoint that will be used to
/// restore the actor.
std::unordered_map<ActorID, ActorCheckpointID> checkpoint_id_to_restore_;
/// The RPC server.
rpc::GrpcServer node_manager_server_;
/// The node manager RPC service.
rpc::NodeManagerGrpcService node_manager_service_;
/// The `ClientCallManager` object that is shared by all `NodeManagerClient`s
/// as well as all `CoreWorkerClient`s.
rpc::ClientCallManager client_call_manager_;
/// Map from node ids to clients of the remote node managers.
std::unordered_map<ClientID, std::unique_ptr<rpc::NodeManagerClient>>
remote_node_manager_clients_;
/// Map of workers leased out to direct call clients.
std::unordered_map<WorkerID, std::shared_ptr<Worker>> leased_workers_;
/// Whether new schedule is enabled.
const bool new_scheduler_enabled_;
/// The new resource scheduler for direct task calls.
std::shared_ptr<ClusterResourceScheduler> new_resource_scheduler_;
/// Map of leased workers to their current resource usage.
/// TODO(ion): Check whether we can track these resources in the worker.
std::unordered_map<WorkerID, ResourceSet> leased_worker_resources_;
typedef std::function<void(std::shared_ptr<Worker>, ClientID spillback_to,
std::string address, int port)>
ScheduleFn;
/// Queue of lease requests that are waiting for resources to become available.
/// TODO this should be a queue for each SchedulingClass
std::deque<std::pair<ScheduleFn, Task>> tasks_to_schedule_;
/// Queue of lease requests that should be scheduled onto workers.
std::deque<std::pair<ScheduleFn, Task>> tasks_to_dispatch_;
/// Cache of gRPC clients to workers (not necessarily running on this node).
/// Also includes the number of inflight requests to each worker - when this
/// reaches zero, the client will be deleted and a new one will need to be created
/// for any subsequent requests.
absl::flat_hash_map<WorkerID, std::pair<std::unique_ptr<rpc::CoreWorkerClient>, size_t>>
worker_rpc_clients_;
absl::flat_hash_map<ObjectID, std::unique_ptr<RayObject>> pinned_objects_;
/// XXX
void WaitForTaskArgsRequests(std::pair<ScheduleFn, Task> &work);
};
} // namespace raylet
} // end namespace ray
#endif // RAY_RAYLET_NODE_MANAGER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/object_manager_integration_test.cc
|
C++
|
#include <iostream>
#include <thread>
#include "gtest/gtest.h"
#include "ray/common/status.h"
#include "ray/raylet/raylet.h"
namespace ray {
namespace raylet {
std::string test_executable;
std::string store_executable;
// TODO(hme): Get this working once the dust settles.
class TestObjectManagerBase : public ::testing::Test {
public:
TestObjectManagerBase() { RAY_LOG(INFO) << "TestObjectManagerBase: started."; }
std::string StartStore(const std::string &id) {
std::string store_id = "/tmp/store";
store_id = store_id + id;
std::string plasma_command = store_executable + " -m 1000000000 -s " + store_id +
" 1> /dev/null 2> /dev/null &";
RAY_LOG(INFO) << plasma_command;
int ec = system(plasma_command.c_str());
RAY_CHECK(ec == 0);
return store_id;
}
NodeManagerConfig GetNodeManagerConfig(std::string raylet_socket_name,
std::string store_socket_name) {
// Configuration for the node manager.
ray::raylet::NodeManagerConfig node_manager_config;
std::unordered_map<std::string, double> static_resource_conf;
static_resource_conf = {{"CPU", 1}, {"GPU", 1}};
node_manager_config.resource_config =
ray::raylet::ResourceSet(std::move(static_resource_conf));
node_manager_config.num_initial_workers = 0;
// Use a default worker that can execute empty tasks with dependencies.
std::vector<std::string> py_worker_command;
py_worker_command.push_back("python");
py_worker_command.push_back("../python/ray/workers/default_worker.py");
py_worker_command.push_back(raylet_socket_name.c_str());
py_worker_command.push_back(store_socket_name.c_str());
node_manager_config.worker_commands[Language::PYTHON] = py_worker_command;
return node_manager_config;
};
void SetUp() {
// start store
std::string store_sock_1 = StartStore("1");
std::string store_sock_2 = StartStore("2");
// start first server
gcs::GcsClientOptions client_options("127.0.0.1", 6379, /*password*/ "", true);
gcs_client_1 =
std::shared_ptr<gcs::RedisGcsClient>(new gcs::RedisGcsClient(client_options));
ObjectManagerConfig om_config_1;
om_config_1.store_socket_name = store_sock_1;
om_config_1.push_timeout_ms = 10000;
server1.reset(new ray::raylet::Raylet(
main_service, "raylet_1", "0.0.0.0", "127.0.0.1", 6379, "",
GetNodeManagerConfig("raylet_1", store_sock_1), om_config_1, gcs_client_1));
// start second server
gcs_client_2 =
std::shared_ptr<gcs::RedisGcsClient>(new gcs::RedisGcsClient(client_options));
ObjectManagerConfig om_config_2;
om_config_2.store_socket_name = store_sock_2;
om_config_2.push_timeout_ms = 10000;
server2.reset(new ray::raylet::Raylet(
main_service, "raylet_2", "0.0.0.0", "127.0.0.1", 6379, "",
GetNodeManagerConfig("raylet_2", store_sock_2), om_config_2, gcs_client_2));
// connect to stores.
RAY_ARROW_CHECK_OK(client1.Connect(store_sock_1));
RAY_ARROW_CHECK_OK(client2.Connect(store_sock_2));
}
void TearDown() {
arrow::Status client1_status = client1.Disconnect();
arrow::Status client2_status = client2.Disconnect();
ASSERT_TRUE(client1_status.ok() && client2_status.ok());
this->server1.reset();
this->server2.reset();
int s = system("killall plasma_store_server &");
ASSERT_TRUE(!s);
std::string cmd_str = test_executable.substr(0, test_executable.find_last_of("/"));
s = system(("rm " + cmd_str + "/raylet_1").c_str());
ASSERT_TRUE(!s);
s = system(("rm " + cmd_str + "/raylet_2").c_str());
ASSERT_TRUE(!s);
}
ObjectID WriteDataToClient(plasma::PlasmaClient &client, int64_t data_size) {
ObjectID object_id = ObjectID::FromRandom();
RAY_LOG(DEBUG) << "ObjectID Created: " << object_id;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
std::shared_ptr<Buffer> data;
RAY_ARROW_CHECK_OK(
client.Create(object_id.ToPlasmaId(), data_size, metadata, metadata_size, &data));
RAY_ARROW_CHECK_OK(client.Seal(object_id.ToPlasmaId()));
return object_id;
}
protected:
std::thread p;
boost::asio::io_service main_service;
std::shared_ptr<gcs::RedisGcsClient> gcs_client_1;
std::shared_ptr<gcs::RedisGcsClient> gcs_client_2;
std::unique_ptr<ray::raylet::Raylet> server1;
std::unique_ptr<ray::raylet::Raylet> server2;
plasma::PlasmaClient client1;
plasma::PlasmaClient client2;
std::vector<ObjectID> v1;
std::vector<ObjectID> v2;
};
class TestObjectManagerIntegration : public TestObjectManagerBase {
public:
size_t num_expected_objects;
int num_connected_clients = 0;
ClientID node_id_1;
ClientID node_id_2;
void WaitConnections() {
node_id_1 = gcs_client_1->Nodes().GetSelfId();
node_id_2 = gcs_client_2->Nodes().GetSelfId();
gcs_client_1->Nodes().AsyncSubscribeToNodeChange(
[this](const ClientID &node_id, const rpc::GcsNodeInfo &data) {
if (node_id == node_id_1 || node_id == node_id_2) {
num_connected_clients += 1;
}
if (num_connected_clients == 2) {
StartTests();
}
},
nullptr);
}
void StartTests() {
TestConnections();
AddTransferTestHandlers();
TestPush(100);
}
void AddTransferTestHandlers() {
ray::Status status = ray::Status::OK();
status = server1->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
v1.push_back(ObjectID::FromBinary(object_info.object_id));
if (v1.size() == num_expected_objects && v1.size() == v2.size()) {
TestPushComplete();
}
});
RAY_CHECK_OK(status);
status = server2->object_manager_.SubscribeObjAdded(
[this](const object_manager::protocol::ObjectInfoT &object_info) {
v2.push_back(ObjectID::FromBinary(object_info.object_id));
if (v2.size() == num_expected_objects && v1.size() == v2.size()) {
TestPushComplete();
}
});
RAY_CHECK_OK(status);
}
void TestPush(int64_t data_size) {
ray::Status status = ray::Status::OK();
num_expected_objects = (size_t)1;
ObjectID oid1 = WriteDataToClient(client1, data_size);
server1->object_manager_.Push(oid1, node_id_2);
}
void TestPushComplete() {
RAY_LOG(INFO) << "TestPushComplete: "
<< " " << v1.size() << " " << v2.size();
ASSERT_TRUE(v1.size() == v2.size());
for (int i = -1; ++i < (int)v1.size();) {
ASSERT_TRUE(std::find(v1.begin(), v1.end(), v2[i]) != v1.end());
}
v1.clear();
v2.clear();
main_service.stop();
}
void TestConnections() {
RAY_LOG(INFO) << "\n"
<< "Server client ids:"
<< "\n";
ClientID node_id_1 = gcs_client_1->Nodes().GetSelfId();
ClientID node_id_2 = gcs_client_2->Nodes().GetSelfId();
RAY_LOG(INFO) << "Server 1: " << node_id_1;
RAY_LOG(INFO) << "Server 2: " << node_id_2;
RAY_LOG(INFO) << "\n"
<< "All connected clients:"
<< "\n";
auto data = gcs_client_2->Nodes().Get(node_id_1);
RAY_LOG(INFO) << (ClientID::FromBinary(data->node_id()).IsNil());
RAY_LOG(INFO) << "ClientID=" << ClientID::FromBinary(data->node_id());
RAY_LOG(INFO) << "ClientIp=" << data->node_manager_address();
RAY_LOG(INFO) << "ClientPort=" << data->node_manager_port();
rpc::GcsNodeInfo data2;
gcs_client_1->Nodes().Get(node_id_2);
RAY_LOG(INFO) << "ClientID=" << ClientID::FromBinary(data2->node_id());
RAY_LOG(INFO) << "ClientIp=" << data2->node_manager_address();
RAY_LOG(INFO) << "ClientPort=" << data2->node_manager_port();
}
};
TEST_F(TestObjectManagerIntegration, StartTestObjectManagerPush) {
auto AsyncStartTests = main_service.wrap([this]() { WaitConnections(); });
AsyncStartTests();
main_service.run();
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
ray::raylet::test_executable = std::string(argv[0]);
ray::raylet::store_executable = std::string(argv[1]);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/raylet.cc
|
C++
|
#include "raylet.h"
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <iostream>
#include "ray/common/status.h"
namespace {
const std::vector<std::string> GenerateEnumNames(const char *const *enum_names_ptr,
int start_index, int end_index) {
std::vector<std::string> enum_names;
for (int i = 0; i < start_index; ++i) {
enum_names.push_back("EmptyMessageType");
}
size_t i = 0;
while (true) {
const char *name = enum_names_ptr[i];
if (name == nullptr) {
break;
}
enum_names.push_back(name);
i++;
}
RAY_CHECK(static_cast<size_t>(end_index) == enum_names.size() - 1)
<< "Message Type mismatch!";
return enum_names;
}
static const std::vector<std::string> node_manager_message_enum =
GenerateEnumNames(ray::protocol::EnumNamesMessageType(),
static_cast<int>(ray::protocol::MessageType::MIN),
static_cast<int>(ray::protocol::MessageType::MAX));
} // namespace
namespace ray {
namespace raylet {
Raylet::Raylet(boost::asio::io_service &main_service, const std::string &socket_name,
const std::string &node_ip_address, const std::string &redis_address,
int redis_port, const std::string &redis_password,
const NodeManagerConfig &node_manager_config,
const ObjectManagerConfig &object_manager_config,
std::shared_ptr<gcs::GcsClient> gcs_client)
: self_node_id_(ClientID::FromRandom()),
gcs_client_(gcs_client),
object_directory_(std::make_shared<ObjectDirectory>(main_service, gcs_client_)),
object_manager_(main_service, self_node_id_, object_manager_config,
object_directory_),
node_manager_(main_service, self_node_id_, node_manager_config, object_manager_,
gcs_client_, object_directory_),
socket_name_(socket_name),
acceptor_(main_service, local_stream_protocol::endpoint(
#if defined(BOOST_ASIO_HAS_LOCAL_SOCKETS)
socket_name
#else // TODO(mehrdadn): HACK: FIXME: This is just to get things compiling!
socket_name.data(), socket_name.size()
#endif
)),
socket_(main_service) {
self_node_info_.set_node_id(self_node_id_.Binary());
self_node_info_.set_state(GcsNodeInfo::ALIVE);
self_node_info_.set_node_manager_address(node_ip_address);
self_node_info_.set_raylet_socket_name(socket_name);
self_node_info_.set_object_store_socket_name(object_manager_config.store_socket_name);
self_node_info_.set_object_manager_port(object_manager_.GetServerPort());
self_node_info_.set_node_manager_port(node_manager_.GetServerPort());
self_node_info_.set_node_manager_hostname(boost::asio::ip::host_name());
}
Raylet::~Raylet() {}
void Raylet::Start() {
RAY_CHECK_OK(RegisterGcs());
// Start listening for clients.
DoAccept();
}
void Raylet::Stop() {
RAY_CHECK_OK(gcs_client_->Nodes().UnregisterSelf());
acceptor_.close();
}
ray::Status Raylet::RegisterGcs() {
RAY_RETURN_NOT_OK(gcs_client_->Nodes().RegisterSelf(self_node_info_));
RAY_LOG(DEBUG) << "Node manager " << self_node_id_ << " started on "
<< self_node_info_.node_manager_address() << ":"
<< self_node_info_.node_manager_port() << " object manager at "
<< self_node_info_.node_manager_address() << ":"
<< self_node_info_.object_manager_port() << ", hostname "
<< self_node_info_.node_manager_hostname();
// Add resource information.
const NodeManagerConfig &node_manager_config = node_manager_.GetInitialConfig();
std::unordered_map<std::string, std::shared_ptr<gcs::ResourceTableData>> resources;
for (const auto &resource_pair : node_manager_config.resource_config.GetResourceMap()) {
auto resource = std::make_shared<gcs::ResourceTableData>();
resource->set_resource_capacity(resource_pair.second);
resources.emplace(resource_pair.first, resource);
}
RAY_RETURN_NOT_OK(
gcs_client_->Nodes().AsyncUpdateResources(self_node_id_, resources, nullptr));
RAY_RETURN_NOT_OK(node_manager_.RegisterGcs());
return Status::OK();
}
void Raylet::DoAccept() {
acceptor_.async_accept(socket_, boost::bind(&Raylet::HandleAccept, this,
boost::asio::placeholders::error));
}
void Raylet::HandleAccept(const boost::system::error_code &error) {
if (!error) {
// TODO: typedef these handlers.
ClientHandler<local_stream_protocol> client_handler =
[this](LocalClientConnection &client) { node_manager_.ProcessNewClient(client); };
MessageHandler<local_stream_protocol> message_handler =
[this](std::shared_ptr<LocalClientConnection> client, int64_t message_type,
const uint8_t *message) {
node_manager_.ProcessClientMessage(client, message_type, message);
};
// Accept a new local client and dispatch it to the node manager.
auto new_connection = LocalClientConnection::Create(
client_handler, message_handler, std::move(socket_), "worker",
node_manager_message_enum,
static_cast<int64_t>(protocol::MessageType::DisconnectClient));
}
// We're ready to accept another client.
DoAccept();
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/raylet.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_RAYLET_H
#define RAY_RAYLET_RAYLET_H
#include <list>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
// clang-format off
#include "ray/raylet/node_manager.h"
#include "ray/object_manager/object_manager.h"
#include "ray/common/task/scheduling_resources.h"
// clang-format on
namespace ray {
namespace raylet {
using rpc::GcsNodeInfo;
class NodeManager;
class Raylet {
public:
/// Create a raylet server and listen for local clients.
///
/// \param main_service The event loop to run the server on.
/// \param object_manager_service The asio io_service tied to the object manager.
/// \param socket_name The Unix domain socket to listen on for local clients.
/// \param node_ip_address The IP address of this node.
/// \param redis_address The IP address of the redis instance we are connecting to.
/// \param redis_port The port of the redis instance we are connecting to.
/// \param redis_password The password of the redis instance we are connecting to.
/// \param node_manager_config Configuration to initialize the node manager.
/// scheduler with.
/// \param object_manager_config Configuration to initialize the object
/// manager.
/// \param gcs_client A client connection to the GCS.
Raylet(boost::asio::io_service &main_service, const std::string &socket_name,
const std::string &node_ip_address, const std::string &redis_address,
int redis_port, const std::string &redis_password,
const NodeManagerConfig &node_manager_config,
const ObjectManagerConfig &object_manager_config,
std::shared_ptr<gcs::GcsClient> gcs_client);
/// Start this raylet.
void Start();
/// Stop this raylet.
void Stop();
/// Destroy the NodeServer.
~Raylet();
private:
/// Register GCS client.
ray::Status RegisterGcs();
/// Accept a client connection.
void DoAccept();
/// Handle an accepted client connection.
void HandleAccept(const boost::system::error_code &error);
friend class TestObjectManagerIntegration;
/// ID of this node.
ClientID self_node_id_;
/// Information of this node.
GcsNodeInfo self_node_info_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// The object table. This is shared between the object manager and node
/// manager.
std::shared_ptr<ObjectDirectoryInterface> object_directory_;
/// Manages client requests for object transfers and availability.
ObjectManager object_manager_;
/// Manages client requests for task submission and execution.
NodeManager node_manager_;
/// The name of the socket this raylet listens on.
std::string socket_name_;
/// An acceptor for new clients.
boost::asio::basic_socket_acceptor<local_stream_protocol> acceptor_;
/// The socket to listen on for new clients.
local_stream_protocol::socket socket_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_RAYLET_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/raylet_client.cc
|
C++
|
#include "raylet_client.h"
#include <inttypes.h>
#include <netdb.h>
#include <netinet/in.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
#include "ray/common/common_protocol.h"
#include "ray/common/ray_config.h"
#include "ray/common/task/task_spec.h"
#include "ray/raylet/format/node_manager_generated.h"
#include "ray/util/logging.h"
using MessageType = ray::protocol::MessageType;
// TODO(rkn): The io methods below should be removed.
bool connect_ipc_sock(Socket &sock, const std::string &socket_pathname) {
struct sockaddr_un socket_address;
sock.reset(socket(AF_UNIX, SOCK_STREAM, 0));
if (sock.get() < 0) {
RAY_LOG(ERROR) << "socket() failed for pathname " << socket_pathname;
return false;
}
memset(&socket_address, 0, sizeof(socket_address));
socket_address.sun_family = AF_UNIX;
if (socket_pathname.length() + 1 > sizeof(socket_address.sun_path)) {
RAY_LOG(ERROR) << "Socket pathname is too long.";
return false;
}
strncpy(socket_address.sun_path, socket_pathname.c_str(), socket_pathname.length() + 1);
if (connect(sock.get(), (struct sockaddr *)&socket_address, sizeof(socket_address)) !=
0) {
return false;
}
return true;
}
int read_bytes(Socket &conn, uint8_t *cursor, size_t length) {
ssize_t nbytes = 0;
// Termination condition: EOF or read 'length' bytes total.
size_t bytesleft = length;
size_t offset = 0;
while (bytesleft > 0) {
nbytes = read(conn.get(), cursor + offset, bytesleft);
if (nbytes < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
continue;
}
return -1; // Errno will be set.
} else if (0 == nbytes) {
// Encountered early EOF.
return -1;
}
RAY_CHECK(nbytes > 0);
bytesleft -= nbytes;
offset += nbytes;
}
return 0;
}
int write_bytes(Socket &conn, uint8_t *cursor, size_t length) {
ssize_t nbytes = 0;
size_t bytesleft = length;
size_t offset = 0;
while (bytesleft > 0) {
// While we haven't written the whole message, write to the file
// descriptor, advance the cursor, and decrease the amount left to write.
nbytes = write(conn.get(), cursor + offset, bytesleft);
if (nbytes < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
continue;
}
return -1; // Errno will be set.
} else if (0 == nbytes) {
// Encountered early EOF.
return -1;
}
RAY_CHECK(nbytes > 0);
bytesleft -= nbytes;
offset += nbytes;
}
return 0;
}
namespace ray {
raylet::RayletConnection::RayletConnection(const std::string &raylet_socket,
int num_retries, int64_t timeout) {
// Pick the default values if the user did not specify.
if (num_retries < 0) {
num_retries = RayConfig::instance().num_connect_attempts();
}
if (timeout < 0) {
timeout = RayConfig::instance().connect_timeout_milliseconds();
}
RAY_CHECK(!raylet_socket.empty());
bool connected = false;
for (int num_attempts = 0; num_attempts < num_retries; ++num_attempts) {
connected = connect_ipc_sock(conn_, raylet_socket);
if (connected) {
break;
}
if (num_attempts > 0) {
RAY_LOG(ERROR) << "Retrying to connect to socket for pathname " << raylet_socket
<< " (num_attempts = " << num_attempts
<< ", num_retries = " << num_retries << ")";
}
// Sleep for timeout milliseconds.
usleep(timeout * 1000);
}
// If we could not connect to the socket, exit.
if (!connected) {
RAY_LOG(FATAL) << "Could not connect to socket " << raylet_socket;
}
}
Status raylet::RayletConnection::Disconnect() {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateDisconnectClient(fbb);
fbb.Finish(message);
auto status = WriteMessage(MessageType::IntentionalDisconnectClient, &fbb);
// Don't be too strict for disconnection errors.
// Just create logs and prevent it from crash.
if (!status.ok()) {
RAY_LOG(ERROR) << status.ToString()
<< " [RayletClient] Failed to disconnect from raylet.";
}
return Status::OK();
}
Status raylet::RayletConnection::ReadMessage(MessageType type,
std::unique_ptr<uint8_t[]> &message) {
int64_t cookie;
int64_t type_field;
int64_t length;
int closed = read_bytes(conn_, (uint8_t *)&cookie, sizeof(cookie));
if (closed) goto disconnected;
RAY_CHECK(cookie == RayConfig::instance().ray_cookie());
closed = read_bytes(conn_, (uint8_t *)&type_field, sizeof(type_field));
if (closed) goto disconnected;
closed = read_bytes(conn_, (uint8_t *)&length, sizeof(length));
if (closed) goto disconnected;
message = std::unique_ptr<uint8_t[]>(new uint8_t[length]);
closed = read_bytes(conn_, message.get(), length);
if (closed) {
// Handle the case in which the socket is closed.
message.reset(nullptr);
disconnected:
message = nullptr;
type_field = static_cast<int64_t>(MessageType::DisconnectClient);
length = 0;
}
if (type_field == static_cast<int64_t>(MessageType::DisconnectClient)) {
return Status::IOError("[RayletClient] Raylet connection closed.");
}
if (type_field != static_cast<int64_t>(type)) {
return Status::TypeError(
std::string("[RayletClient] Raylet connection corrupted. ") +
"Expected message type: " + std::to_string(static_cast<int64_t>(type)) +
"; got message type: " + std::to_string(type_field) +
". Check logs or dmesg for previous errors.");
}
return Status::OK();
}
Status raylet::RayletConnection::WriteMessage(MessageType type,
flatbuffers::FlatBufferBuilder *fbb) {
std::unique_lock<std::mutex> guard(write_mutex_);
int64_t cookie = RayConfig::instance().ray_cookie();
int64_t length = fbb ? fbb->GetSize() : 0;
uint8_t *bytes = fbb ? fbb->GetBufferPointer() : nullptr;
int64_t type_field = static_cast<int64_t>(type);
auto io_error = Status::IOError("[RayletClient] Connection closed unexpectedly.");
int closed;
closed = write_bytes(conn_, (uint8_t *)&cookie, sizeof(cookie));
if (closed) return io_error;
closed = write_bytes(conn_, (uint8_t *)&type_field, sizeof(type_field));
if (closed) return io_error;
closed = write_bytes(conn_, (uint8_t *)&length, sizeof(length));
if (closed) return io_error;
closed = write_bytes(conn_, bytes, length * sizeof(char));
if (closed) return io_error;
return Status::OK();
}
Status raylet::RayletConnection::AtomicRequestReply(
MessageType request_type, MessageType reply_type,
std::unique_ptr<uint8_t[]> &reply_message, flatbuffers::FlatBufferBuilder *fbb) {
std::unique_lock<std::mutex> guard(mutex_);
auto status = WriteMessage(request_type, fbb);
if (!status.ok()) return status;
return ReadMessage(reply_type, reply_message);
}
raylet::RayletClient::RayletClient(
std::shared_ptr<rpc::NodeManagerWorkerClient> grpc_client)
: grpc_client_(std::move(grpc_client)) {}
raylet::RayletClient::RayletClient(
std::shared_ptr<rpc::NodeManagerWorkerClient> grpc_client,
const std::string &raylet_socket, const WorkerID &worker_id, bool is_worker,
const JobID &job_id, const Language &language, ClientID *raylet_id, int port)
: grpc_client_(std::move(grpc_client)), worker_id_(worker_id), job_id_(job_id) {
// For C++14, we could use std::make_unique
conn_ = std::unique_ptr<raylet::RayletConnection>(
new raylet::RayletConnection(raylet_socket, -1, -1));
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateRegisterClientRequest(
fbb, is_worker, to_flatbuf(fbb, worker_id), getpid(), to_flatbuf(fbb, job_id),
language, port);
fbb.Finish(message);
// Register the process ID with the raylet.
// NOTE(swang): If raylet exits and we are registered as a worker, we will get killed.
std::unique_ptr<uint8_t[]> reply;
auto status = conn_->AtomicRequestReply(MessageType::RegisterClientRequest,
MessageType::RegisterClientReply, reply, &fbb);
RAY_CHECK_OK_PREPEND(status, "[RayletClient] Unable to register worker with raylet.");
auto reply_message = flatbuffers::GetRoot<protocol::RegisterClientReply>(reply.get());
*raylet_id = ClientID::FromBinary(reply_message->raylet_id()->str());
}
Status raylet::RayletClient::SubmitTask(const TaskSpecification &task_spec) {
for (size_t i = 0; i < task_spec.NumArgs(); i++) {
if (task_spec.ArgByRef(i)) {
for (size_t j = 0; j < task_spec.ArgIdCount(i); j++) {
RAY_CHECK(!task_spec.ArgId(i, j).IsDirectCallType())
<< "Passing direct call objects to non-direct tasks is not allowed.";
}
}
}
flatbuffers::FlatBufferBuilder fbb;
auto message =
protocol::CreateSubmitTaskRequest(fbb, fbb.CreateString(task_spec.Serialize()));
fbb.Finish(message);
return conn_->WriteMessage(MessageType::SubmitTask, &fbb);
}
Status raylet::RayletClient::TaskDone() {
return conn_->WriteMessage(MessageType::TaskDone);
}
Status raylet::RayletClient::FetchOrReconstruct(const std::vector<ObjectID> &object_ids,
bool fetch_only, bool mark_worker_blocked,
const TaskID ¤t_task_id) {
flatbuffers::FlatBufferBuilder fbb;
auto object_ids_message = to_flatbuf(fbb, object_ids);
auto message = protocol::CreateFetchOrReconstruct(fbb, object_ids_message, fetch_only,
mark_worker_blocked,
to_flatbuf(fbb, current_task_id));
fbb.Finish(message);
auto status = conn_->WriteMessage(MessageType::FetchOrReconstruct, &fbb);
return status;
}
Status raylet::RayletClient::NotifyUnblocked(const TaskID ¤t_task_id) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateNotifyUnblocked(fbb, to_flatbuf(fbb, current_task_id));
fbb.Finish(message);
return conn_->WriteMessage(MessageType::NotifyUnblocked, &fbb);
}
Status raylet::RayletClient::NotifyDirectCallTaskBlocked() {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateNotifyDirectCallTaskBlocked(fbb);
fbb.Finish(message);
return conn_->WriteMessage(MessageType::NotifyDirectCallTaskBlocked, &fbb);
}
Status raylet::RayletClient::NotifyDirectCallTaskUnblocked() {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateNotifyDirectCallTaskUnblocked(fbb);
fbb.Finish(message);
return conn_->WriteMessage(MessageType::NotifyDirectCallTaskUnblocked, &fbb);
}
Status raylet::RayletClient::Wait(const std::vector<ObjectID> &object_ids,
int num_returns, int64_t timeout_milliseconds,
bool wait_local, bool mark_worker_blocked,
const TaskID ¤t_task_id, WaitResultPair *result) {
// Write request.
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateWaitRequest(
fbb, to_flatbuf(fbb, object_ids), num_returns, timeout_milliseconds, wait_local,
mark_worker_blocked, to_flatbuf(fbb, current_task_id));
fbb.Finish(message);
std::unique_ptr<uint8_t[]> reply;
auto status = conn_->AtomicRequestReply(MessageType::WaitRequest,
MessageType::WaitReply, reply, &fbb);
if (!status.ok()) return status;
// Parse the flatbuffer object.
auto reply_message = flatbuffers::GetRoot<protocol::WaitReply>(reply.get());
auto found = reply_message->found();
for (size_t i = 0; i < found->size(); i++) {
ObjectID object_id = ObjectID::FromBinary(found->Get(i)->str());
result->first.push_back(object_id);
}
auto remaining = reply_message->remaining();
for (size_t i = 0; i < remaining->size(); i++) {
ObjectID object_id = ObjectID::FromBinary(remaining->Get(i)->str());
result->second.push_back(object_id);
}
return Status::OK();
}
Status raylet::RayletClient::WaitForDirectActorCallArgs(
const std::vector<ObjectID> &object_ids, int64_t tag) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateWaitForDirectActorCallArgsRequest(
fbb, to_flatbuf(fbb, object_ids), tag);
fbb.Finish(message);
return conn_->WriteMessage(MessageType::WaitForDirectActorCallArgsRequest, &fbb);
}
Status raylet::RayletClient::PushError(const JobID &job_id, const std::string &type,
const std::string &error_message,
double timestamp) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreatePushErrorRequest(
fbb, to_flatbuf(fbb, job_id), fbb.CreateString(type),
fbb.CreateString(error_message), timestamp);
fbb.Finish(message);
return conn_->WriteMessage(MessageType::PushErrorRequest, &fbb);
}
Status raylet::RayletClient::PushProfileEvents(const ProfileTableData &profile_events) {
flatbuffers::FlatBufferBuilder fbb;
auto message = fbb.CreateString(profile_events.SerializeAsString());
fbb.Finish(message);
auto status = conn_->WriteMessage(MessageType::PushProfileEventsRequest, &fbb);
// Don't be too strict for profile errors. Just create logs and prevent it from crash.
if (!status.ok()) {
RAY_LOG(ERROR) << status.ToString()
<< " [RayletClient] Failed to push profile events.";
}
return Status::OK();
}
Status raylet::RayletClient::FreeObjects(const std::vector<ObjectID> &object_ids,
bool local_only, bool delete_creating_tasks) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateFreeObjectsRequest(
fbb, local_only, delete_creating_tasks, to_flatbuf(fbb, object_ids));
fbb.Finish(message);
auto status = conn_->WriteMessage(MessageType::FreeObjectsInObjectStoreRequest, &fbb);
return status;
}
Status raylet::RayletClient::PrepareActorCheckpoint(const ActorID &actor_id,
ActorCheckpointID &checkpoint_id) {
flatbuffers::FlatBufferBuilder fbb;
auto message =
protocol::CreatePrepareActorCheckpointRequest(fbb, to_flatbuf(fbb, actor_id));
fbb.Finish(message);
std::unique_ptr<uint8_t[]> reply;
auto status =
conn_->AtomicRequestReply(MessageType::PrepareActorCheckpointRequest,
MessageType::PrepareActorCheckpointReply, reply, &fbb);
if (!status.ok()) return status;
auto reply_message =
flatbuffers::GetRoot<protocol::PrepareActorCheckpointReply>(reply.get());
checkpoint_id = ActorCheckpointID::FromBinary(reply_message->checkpoint_id()->str());
return Status::OK();
}
Status raylet::RayletClient::NotifyActorResumedFromCheckpoint(
const ActorID &actor_id, const ActorCheckpointID &checkpoint_id) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateNotifyActorResumedFromCheckpoint(
fbb, to_flatbuf(fbb, actor_id), to_flatbuf(fbb, checkpoint_id));
fbb.Finish(message);
return conn_->WriteMessage(MessageType::NotifyActorResumedFromCheckpoint, &fbb);
}
Status raylet::RayletClient::SetResource(const std::string &resource_name,
const double capacity,
const ClientID &client_Id) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateSetResourceRequest(fbb, fbb.CreateString(resource_name),
capacity, to_flatbuf(fbb, client_Id));
fbb.Finish(message);
return conn_->WriteMessage(MessageType::SetResourceRequest, &fbb);
}
Status raylet::RayletClient::ReportActiveObjectIDs(
const std::unordered_set<ObjectID> &object_ids) {
flatbuffers::FlatBufferBuilder fbb;
auto message = protocol::CreateReportActiveObjectIDs(fbb, to_flatbuf(fbb, object_ids));
fbb.Finish(message);
return conn_->WriteMessage(MessageType::ReportActiveObjectIDs, &fbb);
}
Status raylet::RayletClient::RequestWorkerLease(
const TaskSpecification &resource_spec,
const rpc::ClientCallback<rpc::RequestWorkerLeaseReply> &callback) {
rpc::RequestWorkerLeaseRequest request;
request.mutable_resource_spec()->CopyFrom(resource_spec.GetMessage());
return grpc_client_->RequestWorkerLease(request, callback);
}
Status raylet::RayletClient::ReturnWorker(int worker_port, const WorkerID &worker_id,
bool disconnect_worker) {
rpc::ReturnWorkerRequest request;
request.set_worker_port(worker_port);
request.set_worker_id(worker_id.Binary());
request.set_disconnect_worker(disconnect_worker);
return grpc_client_->ReturnWorker(
request, [](const Status &status, const rpc::ReturnWorkerReply &reply) {
if (!status.ok()) {
RAY_LOG(INFO) << "Error returning worker: " << status;
}
});
}
Status raylet::RayletClient::PinObjectIDs(const rpc::Address &caller_address,
const std::vector<ObjectID> &object_ids) {
rpc::PinObjectIDsRequest request;
request.mutable_owner_address()->CopyFrom(caller_address);
for (const ObjectID &object_id : object_ids) {
request.add_object_ids(object_id.Binary());
}
return grpc_client_->PinObjectIDs(request, nullptr);
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/raylet_client.h
|
C/C++ Header
|
#ifndef RAYLET_CLIENT_H
#define RAYLET_CLIENT_H
#include <ray/protobuf/gcs.pb.h>
#include <unistd.h>
#include <boost/asio/detail/socket_holder.hpp>
#include <mutex>
#include <unordered_map>
#include <vector>
#include "ray/common/status.h"
#include "ray/common/task/task_spec.h"
#include "ray/rpc/node_manager/node_manager_client.h"
using ray::ActorCheckpointID;
using ray::ActorID;
using ray::ClientID;
using ray::JobID;
using ray::ObjectID;
using ray::TaskID;
using ray::WorkerID;
using ray::Language;
using ray::rpc::ProfileTableData;
using MessageType = ray::protocol::MessageType;
using ResourceMappingType =
std::unordered_map<std::string, std::vector<std::pair<int64_t, double>>>;
using Socket = boost::asio::detail::socket_holder;
using WaitResultPair = std::pair<std::vector<ObjectID>, std::vector<ObjectID>>;
namespace ray {
/// Interface for leasing workers. Abstract for testing.
class WorkerLeaseInterface {
public:
/// Requests a worker from the raylet. The callback will be sent via gRPC.
/// \param resource_spec Resources that should be allocated for the worker.
/// \return ray::Status
virtual ray::Status RequestWorkerLease(
const ray::TaskSpecification &resource_spec,
const ray::rpc::ClientCallback<ray::rpc::RequestWorkerLeaseReply> &callback) = 0;
/// Returns a worker to the raylet.
/// \param worker_port The local port of the worker on the raylet node.
/// \param worker_id The unique worker id of the worker on the raylet node.
/// \param disconnect_worker Whether the raylet should disconnect the worker.
/// \return ray::Status
virtual ray::Status ReturnWorker(int worker_port, const WorkerID &worker_id,
bool disconnect_worker) = 0;
virtual ~WorkerLeaseInterface(){};
};
namespace raylet {
class RayletConnection {
public:
/// Connect to the raylet.
///
/// \param raylet_socket The name of the socket to use to connect to the raylet.
/// \param worker_id A unique ID to represent the worker.
/// \param is_worker Whether this client is a worker. If it is a worker, an
/// additional message will be sent to register as one.
/// \param job_id The ID of the driver. This is non-nil if the client is a
/// driver.
/// \return The connection information.
RayletConnection(const std::string &raylet_socket, int num_retries, int64_t timeout);
/// Notify the raylet that this client is disconnecting gracefully. This
/// is used by actors to exit gracefully so that the raylet doesn't
/// propagate an error message to the driver.
///
/// \return ray::Status.
ray::Status Disconnect();
ray::Status ReadMessage(MessageType type, std::unique_ptr<uint8_t[]> &message);
ray::Status WriteMessage(MessageType type,
flatbuffers::FlatBufferBuilder *fbb = nullptr);
ray::Status AtomicRequestReply(MessageType request_type, MessageType reply_type,
std::unique_ptr<uint8_t[]> &reply_message,
flatbuffers::FlatBufferBuilder *fbb = nullptr);
private:
/// The Unix domain socket that connects to raylet.
Socket conn_;
/// A mutex to protect stateful operations of the raylet client.
std::mutex mutex_;
/// A mutex to protect write operations of the raylet client.
std::mutex write_mutex_;
};
class RayletClient : public WorkerLeaseInterface {
public:
/// Connect to the raylet.
///
/// \param grpc_client gRPC client to the raylet.
/// \param raylet_socket The name of the socket to use to connect to the raylet.
/// \param worker_id A unique ID to represent the worker.
/// \param is_worker Whether this client is a worker. If it is a worker, an
/// additional message will be sent to register as one.
/// \param job_id The ID of the driver. This is non-nil if the client is a driver.
/// \param language Language of the worker.
/// \param raylet_id This will be populated with the local raylet's ClientID.
/// \param port The port that the worker will listen on for gRPC requests, if
/// any.
RayletClient(std::shared_ptr<ray::rpc::NodeManagerWorkerClient> grpc_client,
const std::string &raylet_socket, const WorkerID &worker_id,
bool is_worker, const JobID &job_id, const Language &language,
ClientID *raylet_id, int port = -1);
/// Connect to the raylet via grpc only.
///
/// \param grpc_client gRPC client to the raylet.
RayletClient(std::shared_ptr<ray::rpc::NodeManagerWorkerClient> grpc_client);
ray::Status Disconnect() { return conn_->Disconnect(); };
/// Submit a task using the raylet code path.
///
/// \param The task specification.
/// \return ray::Status.
ray::Status SubmitTask(const ray::TaskSpecification &task_spec);
/// Tell the raylet that the client has finished executing a task.
///
/// \return ray::Status.
ray::Status TaskDone();
/// Tell the raylet to reconstruct or fetch objects.
///
/// \param object_ids The IDs of the objects to reconstruct.
/// \param fetch_only Only fetch objects, do not reconstruct them.
/// \param mark_worker_blocked Set to false if current task is a direct call task.
/// \param current_task_id The task that needs the objects.
/// \return int 0 means correct, other numbers mean error.
ray::Status FetchOrReconstruct(const std::vector<ObjectID> &object_ids, bool fetch_only,
bool mark_worker_blocked, const TaskID ¤t_task_id);
/// Notify the raylet that this client (worker) is no longer blocked.
///
/// \param current_task_id The task that is no longer blocked.
/// \return ray::Status.
ray::Status NotifyUnblocked(const TaskID ¤t_task_id);
/// Notify the raylet that this client is blocked. This is only used for direct task
/// calls. Note that ordering of this with respect to Unblock calls is important.
///
/// \return ray::Status.
ray::Status NotifyDirectCallTaskBlocked();
/// Notify the raylet that this client is unblocked. This is only used for direct task
/// calls. Note that ordering of this with respect to Block calls is important.
///
/// \return ray::Status.
ray::Status NotifyDirectCallTaskUnblocked();
/// Wait for the given objects until timeout expires or num_return objects are
/// found.
///
/// \param object_ids The objects to wait for.
/// \param num_returns The number of objects to wait for.
/// \param timeout_milliseconds Duration, in milliseconds, to wait before returning.
/// \param wait_local Whether to wait for objects to appear on this node.
/// \param mark_worker_blocked Set to false if current task is a direct call task.
/// \param current_task_id The task that called wait.
/// \param result A pair with the first element containing the object ids that were
/// found, and the second element the objects that were not found.
/// \return ray::Status.
ray::Status Wait(const std::vector<ObjectID> &object_ids, int num_returns,
int64_t timeout_milliseconds, bool wait_local,
bool mark_worker_blocked, const TaskID ¤t_task_id,
WaitResultPair *result);
/// Wait for the given objects, asynchronously. The core worker is notified when
/// the wait completes.
///
/// \param object_ids The objects to wait for.
/// \param tag Value that will be sent to the core worker via gRPC on completion.
/// \return ray::Status.
ray::Status WaitForDirectActorCallArgs(const std::vector<ObjectID> &object_ids,
int64_t tag);
/// Push an error to the relevant driver.
///
/// \param The ID of the job_id that the error is for.
/// \param The type of the error.
/// \param The error message.
/// \param The timestamp of the error.
/// \return ray::Status.
ray::Status PushError(const ray::JobID &job_id, const std::string &type,
const std::string &error_message, double timestamp);
/// Store some profile events in the GCS.
///
/// \param profile_events A batch of profiling event information.
/// \return ray::Status.
ray::Status PushProfileEvents(const ProfileTableData &profile_events);
/// Free a list of objects from object stores.
///
/// \param object_ids A list of ObjectsIDs to be deleted.
/// \param local_only Whether keep this request with local object store
/// or send it to all the object stores.
/// \param delete_creating_tasks Whether also delete objects' creating tasks from GCS.
/// \return ray::Status.
ray::Status FreeObjects(const std::vector<ray::ObjectID> &object_ids, bool local_only,
bool deleteCreatingTasks);
/// Request raylet backend to prepare a checkpoint for an actor.
///
/// \param actor_id ID of the actor.
/// \param checkpoint_id ID of the new checkpoint (output parameter).
/// \return ray::Status.
ray::Status PrepareActorCheckpoint(const ActorID &actor_id,
ActorCheckpointID &checkpoint_id);
/// Notify raylet backend that an actor was resumed from a checkpoint.
///
/// \param actor_id ID of the actor.
/// \param checkpoint_id ID of the checkpoint from which the actor was resumed.
/// \return ray::Status.
ray::Status NotifyActorResumedFromCheckpoint(const ActorID &actor_id,
const ActorCheckpointID &checkpoint_id);
/// Sets a resource with the specified capacity and client id
/// \param resource_name Name of the resource to be set
/// \param capacity Capacity of the resource
/// \param client_Id ClientID where the resource is to be set
/// \return ray::Status
ray::Status SetResource(const std::string &resource_name, const double capacity,
const ray::ClientID &client_Id);
/// Notifies the raylet of the object IDs currently in use on this worker.
/// \param object_ids The set of object IDs currently in use.
/// \return ray::Status
ray::Status ReportActiveObjectIDs(const std::unordered_set<ObjectID> &object_ids);
/// Implements WorkerLeaseInterface.
ray::Status RequestWorkerLease(
const ray::TaskSpecification &resource_spec,
const ray::rpc::ClientCallback<ray::rpc::RequestWorkerLeaseReply> &callback)
override;
/// Implements WorkerLeaseInterface.
ray::Status ReturnWorker(int worker_port, const WorkerID &worker_id,
bool disconnect_worker) override;
ray::Status PinObjectIDs(const rpc::Address &caller_address,
const std::vector<ObjectID> &object_ids);
WorkerID GetWorkerID() const { return worker_id_; }
JobID GetJobID() const { return job_id_; }
const ResourceMappingType &GetResourceIDs() const { return resource_ids_; }
private:
/// gRPC client to the raylet. Right now, this is only used for a couple
/// request types.
std::shared_ptr<ray::rpc::NodeManagerWorkerClient> grpc_client_;
const WorkerID worker_id_;
const JobID job_id_;
/// A map from resource name to the resource IDs that are currently reserved
/// for this worker. Each pair consists of the resource ID and the fraction
/// of that resource allocated for this worker.
ResourceMappingType resource_ids_;
/// The connection to the raylet server.
std::unique_ptr<RayletConnection> conn_;
};
} // namespace raylet
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/reconstruction_policy.cc
|
C++
|
#include "reconstruction_policy.h"
#include "ray/stats/stats.h"
namespace ray {
namespace raylet {
ReconstructionPolicy::ReconstructionPolicy(
boost::asio::io_service &io_service,
std::function<void(const TaskID &, const ObjectID &)> reconstruction_handler,
int64_t initial_reconstruction_timeout_ms, const ClientID &client_id,
std::shared_ptr<gcs::GcsClient> gcs_client,
std::shared_ptr<ObjectDirectoryInterface> object_directory)
: io_service_(io_service),
reconstruction_handler_(reconstruction_handler),
initial_reconstruction_timeout_ms_(initial_reconstruction_timeout_ms),
client_id_(client_id),
gcs_client_(gcs_client),
object_directory_(std::move(object_directory)) {}
void ReconstructionPolicy::SetTaskTimeout(
std::unordered_map<TaskID, ReconstructionTask>::iterator task_it,
int64_t timeout_ms) {
task_it->second.expires_at = current_time_ms() + timeout_ms;
auto timeout = boost::posix_time::milliseconds(timeout_ms);
task_it->second.reconstruction_timer->expires_from_now(timeout);
const TaskID task_id = task_it->first;
task_it->second.reconstruction_timer->async_wait(
[this, task_id](const boost::system::error_code &error) {
if (!error) {
auto it = listening_tasks_.find(task_id);
if (it == listening_tasks_.end()) {
return;
}
if (it->second.subscribed) {
// If the timer expired and we were subscribed to notifications,
// then this means that we did not receive a task lease
// notification within the lease period. Otherwise, the timer
// would have been reset when the most recent notification was
// received. The current lease is now considered expired.
HandleTaskLeaseExpired(task_id);
} else {
const auto task_lease_notification_callback =
[this](const TaskID &task_id,
const boost::optional<rpc::TaskLeaseData> &task_lease) {
OnTaskLeaseNotification(task_id, task_lease);
};
// This task is still required, so subscribe to task lease
// notifications. Reconstruction will be triggered if the current
// task lease expires, or if no one has acquired the task lease.
// NOTE(swang): When reconstruction for a task is first requested,
// we do not initially subscribe to task lease notifications, which
// requires at least one GCS operation. This is in case the objects
// required by the task are no longer needed soon after. If the
// task is still required after this initial period, then we now
// subscribe to task lease notifications.
RAY_CHECK_OK(gcs_client_->Tasks().AsyncSubscribeTaskLease(
task_id, task_lease_notification_callback, /*done*/ nullptr));
it->second.subscribed = true;
}
} else {
// Check that the error was due to the timer being canceled.
RAY_CHECK(error == boost::asio::error::operation_aborted);
}
});
}
void ReconstructionPolicy::OnTaskLeaseNotification(
const TaskID &task_id, const boost::optional<rpc::TaskLeaseData> &task_lease) {
if (!task_lease) {
// Task lease not exist.
HandleTaskLeaseNotification(task_id, 0);
return;
}
const ClientID node_manager_id = ClientID::FromBinary(task_lease->node_manager_id());
if (gcs_client_->Nodes().IsRemoved(node_manager_id)) {
// The node manager that added the task lease is already removed. The
// lease is considered inactive.
HandleTaskLeaseNotification(task_id, 0);
} else {
// NOTE(swang): The task_lease.timeout is an overestimate of the
// lease's expiration period since the entry may have been in the GCS
// for some time already. For a more accurate estimate, the age of the
// entry in the GCS should be subtracted from task_lease.timeout.
HandleTaskLeaseNotification(task_id, task_lease->timeout());
}
}
void ReconstructionPolicy::HandleReconstructionLogAppend(
const TaskID &task_id, const ObjectID &required_object_id, bool success) {
auto it = listening_tasks_.find(task_id);
if (it == listening_tasks_.end()) {
return;
}
// Reset the timer to wait for task lease notifications again. NOTE(swang):
// The timer should already be set here, but we extend it to give some time
// for the reconstructed task to propagate notifications.
SetTaskTimeout(it, initial_reconstruction_timeout_ms_);
if (success) {
reconstruction_handler_(task_id, required_object_id);
}
}
void ReconstructionPolicy::AttemptReconstruction(const TaskID &task_id,
const ObjectID &required_object_id,
int reconstruction_attempt) {
// If we are no longer listening for objects created by this task, give up.
auto it = listening_tasks_.find(task_id);
if (it == listening_tasks_.end()) {
return;
}
// If the object is no longer required, give up.
if (it->second.created_objects.count(required_object_id) == 0) {
return;
}
// Suppress duplicate reconstructions of the same task. This can happen if,
// for example, a task creates two different objects that both require
// reconstruction.
if (reconstruction_attempt != it->second.reconstruction_attempt) {
// Through some other path, reconstruction was already attempted more than
// reconstruction_attempt many times.
return;
}
// Attempt to reconstruct the task by inserting an entry into the task
// reconstruction log. This will fail if another node has already inserted
// an entry for this reconstruction.
auto reconstruction_entry = std::make_shared<TaskReconstructionData>();
reconstruction_entry->set_task_id(task_id.Binary());
reconstruction_entry->set_num_reconstructions(reconstruction_attempt);
reconstruction_entry->set_node_manager_id(client_id_.Binary());
RAY_CHECK_OK(gcs_client_->Tasks().AttemptTaskReconstruction(
reconstruction_entry,
/*done=*/
[this, task_id, required_object_id](Status status) {
if (status.ok()) {
HandleReconstructionLogAppend(task_id, required_object_id, /*success=*/true);
} else {
HandleReconstructionLogAppend(task_id, required_object_id, /*success=*/false);
}
}));
// Increment the number of times reconstruction has been attempted. This is
// used to suppress duplicate reconstructions of the same task. If
// reconstruction is attempted again, the next attempt will try to insert a
// task reconstruction entry at the next index in the log.
it->second.reconstruction_attempt++;
}
void ReconstructionPolicy::HandleTaskLeaseExpired(const TaskID &task_id) {
auto it = listening_tasks_.find(task_id);
RAY_CHECK(it != listening_tasks_.end());
int reconstruction_attempt = it->second.reconstruction_attempt;
// Lookup the objects created by this task in the object directory. If any
// objects no longer exist on any live nodes, then reconstruction will be
// attempted asynchronously.
for (const auto &created_object_id : it->second.created_objects) {
RAY_CHECK_OK(object_directory_->LookupLocations(
created_object_id, [this, task_id, reconstruction_attempt](
const ray::ObjectID &object_id,
const std::unordered_set<ray::ClientID> &clients) {
if (clients.empty()) {
// The required object no longer exists on any live nodes. Attempt
// reconstruction.
AttemptReconstruction(task_id, object_id, reconstruction_attempt);
}
}));
}
// Reset the timer to wait for task lease notifications again.
SetTaskTimeout(it, initial_reconstruction_timeout_ms_);
}
void ReconstructionPolicy::HandleTaskLeaseNotification(const TaskID &task_id,
int64_t lease_timeout_ms) {
auto it = listening_tasks_.find(task_id);
if (it == listening_tasks_.end()) {
// We are no longer listening for this task, so ignore the notification.
return;
}
if (lease_timeout_ms == 0) {
HandleTaskLeaseExpired(task_id);
} else if ((current_time_ms() + lease_timeout_ms) > it->second.expires_at) {
// The current lease is longer than the timer's current expiration time.
// Reset the timer according to the current lease.
SetTaskTimeout(it, lease_timeout_ms);
}
}
void ReconstructionPolicy::ListenAndMaybeReconstruct(const ObjectID &object_id) {
RAY_LOG(DEBUG) << "Listening and maybe reconstructing object " << object_id;
TaskID task_id = object_id.TaskId();
auto it = listening_tasks_.find(task_id);
// Add this object to the list of objects created by the same task.
if (it == listening_tasks_.end()) {
auto inserted = listening_tasks_.emplace(task_id, ReconstructionTask(io_service_));
it = inserted.first;
// Set a timer for the task that created the object. If the lease for that
// task expires, then reconstruction of that task will be triggered.
SetTaskTimeout(it, initial_reconstruction_timeout_ms_);
}
it->second.created_objects.insert(object_id);
}
void ReconstructionPolicy::Cancel(const ObjectID &object_id) {
RAY_LOG(DEBUG) << "Reconstruction for object " << object_id << " canceled";
TaskID task_id = object_id.TaskId();
auto it = listening_tasks_.find(task_id);
if (it == listening_tasks_.end()) {
// We already stopped listening for this task.
return;
}
it->second.created_objects.erase(object_id);
// If there are no more needed objects created by this task, stop listening
// for notifications.
if (it->second.created_objects.empty()) {
// Cancel notifications for the task lease if we were subscribed to them.
if (it->second.subscribed) {
RAY_CHECK_OK(
gcs_client_->Tasks().AsyncUnsubscribeTaskLease(task_id, /*done*/ nullptr));
}
listening_tasks_.erase(it);
}
}
std::string ReconstructionPolicy::DebugString() const {
std::stringstream result;
result << "ReconstructionPolicy:";
result << "\n- num reconstructing: " << listening_tasks_.size();
return result.str();
}
void ReconstructionPolicy::RecordMetrics() const {
stats::ReconstructionPolicyStats().Record(
listening_tasks_.size(), {{stats::ValueTypeKey, "num_reconstructing_tasks"}});
}
} // namespace raylet
} // end namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/reconstruction_policy.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_RECONSTRUCTION_POLICY_H
#define RAY_RAYLET_RECONSTRUCTION_POLICY_H
#include <functional>
#include <unordered_map>
#include <unordered_set>
#include <boost/asio.hpp>
#include "ray/common/id.h"
#include "ray/gcs/tables.h"
#include "ray/object_manager/object_directory.h"
namespace ray {
namespace raylet {
using rpc::TaskReconstructionData;
class ReconstructionPolicyInterface {
public:
virtual void ListenAndMaybeReconstruct(const ObjectID &object_id) = 0;
virtual void Cancel(const ObjectID &object_id) = 0;
virtual ~ReconstructionPolicyInterface(){};
};
class ReconstructionPolicy : public ReconstructionPolicyInterface {
public:
/// Create the reconstruction policy.
///
/// \param io_service The event loop to attach reconstruction timers to.
/// \param reconstruction_handler The handler to call if a task needs to be
/// re-executed.
/// \param initial_reconstruction_timeout_ms The initial timeout within which
/// a task lease notification must be received. Otherwise, reconstruction
/// will be triggered.
/// \param client_id The client ID to use when requesting notifications from
/// the GCS.
/// \param gcs_client The Client of GCS.
/// lease notifications from.
ReconstructionPolicy(
boost::asio::io_service &io_service,
std::function<void(const TaskID &, const ObjectID &)> reconstruction_handler,
int64_t initial_reconstruction_timeout_ms, const ClientID &client_id,
std::shared_ptr<gcs::GcsClient> gcs_client,
std::shared_ptr<ObjectDirectoryInterface> object_directory);
/// Listen for task lease notifications about an object that may require
/// reconstruction. If no notifications are received within the initial
/// timeout, then the registered task reconstruction handler will be called
/// for the task that created the object.
///
/// \param object_id The object to check for reconstruction.
void ListenAndMaybeReconstruct(const ObjectID &object_id);
/// Cancel listening for an object. Notifications for the object will be
/// ignored. This does not cancel a reconstruction attempt that is already in
/// progress.
///
/// \param object_id The object to cancel.
void Cancel(const ObjectID &object_id);
/// Handle a notification for a task lease. This handler should be called to
/// indicate that a task is currently being executed, so any objects that it
/// creates should not be reconstructed.
///
/// \param task_id The task ID of the task being executed.
/// \param lease_timeout_ms After this timeout, the task's lease is
/// guaranteed to be expired. If a second notification is not received within
/// this timeout, then objects that the task creates may be reconstructed.
void HandleTaskLeaseNotification(const TaskID &task_id, int64_t lease_timeout_ms);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
private:
struct ReconstructionTask {
ReconstructionTask(boost::asio::io_service &io_service)
: expires_at(INT64_MAX),
subscribed(false),
reconstruction_attempt(0),
reconstruction_timer(new boost::asio::deadline_timer(io_service)) {}
// The objects created by this task that we are listening for notifications for.
std::unordered_set<ObjectID> created_objects;
// The time at which the timer for this task expires, according to this
// node's steady clock.
int64_t expires_at;
// Whether we are subscribed to lease notifications for this task.
bool subscribed;
// The number of times we've attempted reconstructing this task so far.
int reconstruction_attempt;
// The task's reconstruction timer. If this expires before a lease
// notification is received, then the task will be reconstructed.
std::unique_ptr<boost::asio::deadline_timer> reconstruction_timer;
};
/// Set the reconstruction timer for a task. If no task lease notifications
/// are received within the timeout, then reconstruction will be triggered.
/// If the timer was previously set, this method will cancel it and reset the
/// timer to the new timeout.
void SetTaskTimeout(std::unordered_map<TaskID, ReconstructionTask>::iterator task_it,
int64_t timeout_ms);
/// Handle task lease notification from GCS.
void OnTaskLeaseNotification(const TaskID &task_id,
const boost::optional<rpc::TaskLeaseData> &task_lease);
/// Attempt to re-execute a task to reconstruct the required object.
///
/// \param task_id The task to attempt to re-execute.
/// \param required_object_id The object created by the task that requires
/// reconstruction.
/// \param reconstruction_attempt What number attempt this is at
/// reconstructing the task. This is used to suppress duplicate
/// reconstructions of the same task (e.g., if a task creates two objects
/// that both require reconstruction).
void AttemptReconstruction(const TaskID &task_id, const ObjectID &required_object_id,
int reconstruction_attempt);
/// Handle expiration of a task lease.
void HandleTaskLeaseExpired(const TaskID &task_id);
/// Handle the response for an attempt at adding an entry to the task
/// reconstruction log.
void HandleReconstructionLogAppend(const TaskID &task_id, const ObjectID &object_id,
bool success);
/// The event loop.
boost::asio::io_service &io_service_;
/// The handler to call for tasks that require reconstruction.
const std::function<void(const TaskID &, const ObjectID &)> reconstruction_handler_;
/// The initial timeout within which a task lease notification must be
/// received. Otherwise, reconstruction will be triggered.
const int64_t initial_reconstruction_timeout_ms_;
/// The client ID to use when requesting notifications from the GCS.
const ClientID client_id_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// The object directory used to lookup object locations.
std::shared_ptr<ObjectDirectoryInterface> object_directory_;
/// The tasks that we are currently subscribed to in the GCS.
std::unordered_map<TaskID, ReconstructionTask> listening_tasks_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_RECONSTRUCTION_POLICY_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/reconstruction_policy_test.cc
|
C++
|
#include <list>
#include "absl/time/clock.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <boost/asio.hpp>
#include "ray/gcs/callback.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/raylet/format/node_manager_generated.h"
#include "ray/raylet/reconstruction_policy.h"
#include "ray/object_manager/object_directory.h"
namespace ray {
namespace raylet {
using rpc::TaskLeaseData;
// A helper function to get a normal task id.
inline TaskID ForNormalTask() {
const static JobID job_id = JobID::FromInt(1);
const static TaskID driver_task_id = TaskID::ForDriverTask(job_id);
static TaskID task_id =
TaskID::ForNormalTask(job_id, driver_task_id, /*parent_task_counter=*/1);
return task_id;
}
class MockObjectDirectory : public ObjectDirectoryInterface {
public:
MockObjectDirectory() {}
ray::Status LookupLocations(const ObjectID &object_id,
const OnLocationsFound &callback) override {
callbacks_.push_back({object_id, callback});
return ray::Status::OK();
}
void FlushCallbacks() {
for (const auto &callback : callbacks_) {
const ObjectID object_id = callback.first;
auto it = locations_.find(object_id);
if (it == locations_.end()) {
callback.second(object_id, std::unordered_set<ray::ClientID>());
} else {
callback.second(object_id, it->second);
}
}
callbacks_.clear();
}
void SetObjectLocations(const ObjectID &object_id,
const std::unordered_set<ClientID> &locations) {
locations_[object_id] = locations;
}
void HandleClientRemoved(const ClientID &client_id) override {
for (auto &locations : locations_) {
locations.second.erase(client_id);
}
}
std::string DebugString() const override { return ""; }
MOCK_METHOD0(GetLocalClientID, ray::ClientID());
MOCK_CONST_METHOD1(LookupRemoteConnectionInfo, void(RemoteConnectionInfo &));
MOCK_CONST_METHOD0(LookupAllRemoteConnections, std::vector<RemoteConnectionInfo>());
MOCK_METHOD3(SubscribeObjectLocations,
ray::Status(const ray::UniqueID &, const ObjectID &,
const OnLocationsFound &));
MOCK_METHOD2(UnsubscribeObjectLocations,
ray::Status(const ray::UniqueID &, const ObjectID &));
MOCK_METHOD3(ReportObjectAdded,
ray::Status(const ObjectID &, const ClientID &,
const object_manager::protocol::ObjectInfoT &));
MOCK_METHOD3(ReportObjectRemoved,
ray::Status(const ObjectID &, const ClientID &,
const object_manager::protocol::ObjectInfoT &));
private:
std::vector<std::pair<ObjectID, OnLocationsFound>> callbacks_;
std::unordered_map<ObjectID, std::unordered_set<ClientID>> locations_;
};
class MockNodeInfoAccessor : public gcs::RedisNodeInfoAccessor {
public:
MockNodeInfoAccessor(gcs::RedisGcsClient *client)
: gcs::RedisNodeInfoAccessor(client) {}
bool IsRemoved(const ClientID &node_id) const override { return false; }
};
class MockTaskInfoAccessor : public gcs::RedisTaskInfoAccessor {
public:
MockTaskInfoAccessor(gcs::RedisGcsClient *client) : RedisTaskInfoAccessor(client) {}
Status AsyncSubscribeTaskLease(
const TaskID &task_id,
const gcs::SubscribeCallback<TaskID, boost::optional<TaskLeaseData>> &subscribe,
const gcs::StatusCallback &done) override {
subscribe_callback_ = subscribe;
subscribed_tasks_.insert(task_id);
auto entry = task_lease_table_.find(task_id);
if (entry == task_lease_table_.end()) {
boost::optional<TaskLeaseData> result;
subscribe(task_id, result);
} else {
boost::optional<TaskLeaseData> result(*entry->second);
subscribe(task_id, result);
}
return ray::Status::OK();
}
Status AsyncUnsubscribeTaskLease(const TaskID &task_id,
const gcs::StatusCallback &done) override {
subscribed_tasks_.erase(task_id);
return ray::Status::OK();
}
Status AsyncAddTaskLease(const std::shared_ptr<TaskLeaseData> &task_lease_data,
const gcs::StatusCallback &done) override {
TaskID task_id = TaskID::FromBinary(task_lease_data->task_id());
task_lease_table_[task_id] = task_lease_data;
if (subscribed_tasks_.count(task_id) == 1) {
boost::optional<TaskLeaseData> result(*task_lease_data);
subscribe_callback_(task_id, result);
}
return Status::OK();
}
Status AttemptTaskReconstruction(
const std::shared_ptr<TaskReconstructionData> &task_data,
const gcs::StatusCallback &done) override {
int log_index = task_data->num_reconstructions();
TaskID task_id = TaskID::FromBinary(task_data->task_id());
if (task_reconstruction_log_[task_id].size() == static_cast<size_t>(log_index)) {
task_reconstruction_log_[task_id].push_back(*task_data);
if (done != nullptr) {
done(Status::OK());
}
} else {
if (done != nullptr) {
done(Status::Invalid("Updating task reconstruction failed."));
}
}
return Status::OK();
}
private:
gcs::SubscribeCallback<TaskID, boost::optional<TaskLeaseData>> subscribe_callback_;
std::unordered_map<TaskID, std::shared_ptr<TaskLeaseData>> task_lease_table_;
std::unordered_set<TaskID> subscribed_tasks_;
std::unordered_map<TaskID, std::vector<TaskReconstructionData>>
task_reconstruction_log_;
};
class MockGcs : public gcs::RedisGcsClient {
public:
MockGcs() : gcs::RedisGcsClient(gcs::GcsClientOptions("", 0, "")){};
void Init(gcs::TaskInfoAccessor *task_accessor, gcs::NodeInfoAccessor *node_accessor) {
task_accessor_.reset(task_accessor);
node_accessor_.reset(node_accessor);
}
};
class ReconstructionPolicyTest : public ::testing::Test {
public:
ReconstructionPolicyTest()
: io_service_(),
mock_gcs_(new MockGcs()),
task_accessor_(new MockTaskInfoAccessor(mock_gcs_.get())),
node_accessor_(new MockNodeInfoAccessor(mock_gcs_.get())),
mock_object_directory_(std::make_shared<MockObjectDirectory>()),
reconstruction_timeout_ms_(50),
reconstruction_policy_(std::make_shared<ReconstructionPolicy>(
io_service_,
[this](const TaskID &task_id, const ObjectID &obj) {
TriggerReconstruction(task_id);
},
reconstruction_timeout_ms_, ClientID::FromRandom(), mock_gcs_,
mock_object_directory_)),
timer_canceled_(false) {
subscribe_callback_ = [this](const TaskID &task_id,
const boost::optional<TaskLeaseData> &task_lease) {
if (task_lease) {
reconstruction_policy_->HandleTaskLeaseNotification(task_id,
task_lease->timeout());
} else {
reconstruction_policy_->HandleTaskLeaseNotification(task_id, 0);
}
};
mock_gcs_->Init(task_accessor_, node_accessor_);
}
void TriggerReconstruction(const TaskID &task_id) { reconstructed_tasks_[task_id]++; }
void Tick(const std::function<void(void)> &handler,
std::shared_ptr<boost::asio::deadline_timer> timer,
boost::posix_time::milliseconds timer_period,
const boost::system::error_code &error) {
if (timer_canceled_) {
return;
}
ASSERT_FALSE(error);
handler();
// Fire the timer again after another period.
timer->expires_from_now(timer_period);
timer->async_wait(
[this, handler, timer, timer_period](const boost::system::error_code &error) {
Tick(handler, timer, timer_period, error);
});
}
void SetPeriodicTimer(uint64_t period_ms, const std::function<void(void)> &handler) {
timer_canceled_ = false;
auto timer_period = boost::posix_time::milliseconds(period_ms);
auto timer = std::make_shared<boost::asio::deadline_timer>(io_service_, timer_period);
timer->async_wait(
[this, handler, timer, timer_period](const boost::system::error_code &error) {
Tick(handler, timer, timer_period, error);
});
}
void CancelPeriodicTimer() { timer_canceled_ = true; }
void Run(uint64_t reconstruction_timeout_ms) {
auto timer_period = boost::posix_time::milliseconds(reconstruction_timeout_ms);
auto timer = std::make_shared<boost::asio::deadline_timer>(io_service_, timer_period);
timer->async_wait([this, timer](const boost::system::error_code &error) {
ASSERT_FALSE(error);
io_service_.stop();
});
io_service_.run();
io_service_.reset();
mock_object_directory_->FlushCallbacks();
}
protected:
boost::asio::io_service io_service_;
std::shared_ptr<MockGcs> mock_gcs_;
MockTaskInfoAccessor *task_accessor_;
MockNodeInfoAccessor *node_accessor_;
gcs::SubscribeCallback<TaskID, boost::optional<TaskLeaseData>> subscribe_callback_;
std::shared_ptr<MockObjectDirectory> mock_object_directory_;
uint64_t reconstruction_timeout_ms_;
std::shared_ptr<ReconstructionPolicy> reconstruction_policy_;
bool timer_canceled_;
std::unordered_map<TaskID, int> reconstructed_tasks_;
};
TEST_F(ReconstructionPolicyTest, TestReconstructionSimple) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
// Listen for an object.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test for longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was triggered for the task that created the
// object.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was triggered again.
ASSERT_EQ(reconstructed_tasks_[task_id], 2);
}
TEST_F(ReconstructionPolicyTest, TestReconstructionEvicted) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
mock_object_directory_->SetObjectLocations(object_id, {ClientID::FromRandom()});
// Listen for both objects.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test for longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was not triggered, since the objects still
// exist on a live node.
ASSERT_EQ(reconstructed_tasks_[task_id], 0);
// Simulate evicting one of the objects.
mock_object_directory_->SetObjectLocations(object_id,
std::unordered_set<ray::ClientID>());
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was triggered, since one of the objects was
// evicted.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
TEST_F(ReconstructionPolicyTest, TestReconstructionObjectLost) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
ClientID client_id = ClientID::FromRandom();
mock_object_directory_->SetObjectLocations(object_id, {client_id});
// Listen for both objects.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test for longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was not triggered, since the objects still
// exist on a live node.
ASSERT_EQ(reconstructed_tasks_[task_id], 0);
// Simulate evicting one of the objects.
mock_object_directory_->HandleClientRemoved(client_id);
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction was triggered, since one of the objects was
// evicted.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
TEST_F(ReconstructionPolicyTest, TestDuplicateReconstruction) {
// Create two object IDs produced by the same task.
TaskID task_id = ForNormalTask();
ObjectID object_id1 =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
ObjectID object_id2 =
ObjectID::ForTaskReturn(task_id, /*index=*/2, /*transport_type=*/0);
// Listen for both objects.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id1);
reconstruction_policy_->ListenAndMaybeReconstruct(object_id2);
// Run the test for longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction is only triggered once for the task that created
// both objects.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction is again only triggered once.
ASSERT_EQ(reconstructed_tasks_[task_id], 2);
}
TEST_F(ReconstructionPolicyTest, TestReconstructionSuppressed) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
// Run the test for much longer than the reconstruction timeout.
int64_t test_period = 2 * reconstruction_timeout_ms_;
// Acquire the task lease for a period longer than the test period.
auto task_lease_data = std::make_shared<TaskLeaseData>();
task_lease_data->set_node_manager_id(ClientID::FromRandom().Binary());
task_lease_data->set_acquired_at(absl::GetCurrentTimeNanos() / 1000000);
task_lease_data->set_timeout(2 * test_period);
task_lease_data->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->Tasks().AsyncAddTaskLease(task_lease_data, nullptr));
// Listen for an object.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test.
Run(test_period);
// Check that reconstruction is suppressed by the active task lease.
ASSERT_TRUE(reconstructed_tasks_.empty());
// Run the test again past the expiration time of the lease.
Run(task_lease_data->timeout() * 1.1);
// Check that this time, reconstruction is triggered.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
TEST_F(ReconstructionPolicyTest, TestReconstructionContinuallySuppressed) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
// Listen for an object.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Send the reconstruction manager heartbeats about the object.
SetPeriodicTimer(reconstruction_timeout_ms_ / 2, [this, task_id]() {
auto task_lease_data = std::make_shared<TaskLeaseData>();
task_lease_data->set_node_manager_id(ClientID::FromRandom().Binary());
task_lease_data->set_acquired_at(absl::GetCurrentTimeNanos() / 1000000);
task_lease_data->set_timeout(reconstruction_timeout_ms_);
task_lease_data->set_task_id(task_id.Binary());
RAY_CHECK_OK(mock_gcs_->Tasks().AsyncAddTaskLease(task_lease_data, nullptr));
});
// Run the test for much longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 2);
// Check that reconstruction is suppressed.
ASSERT_TRUE(reconstructed_tasks_.empty());
// Cancel the heartbeats to the reconstruction manager.
CancelPeriodicTimer();
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that this time, reconstruction is triggered.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
TEST_F(ReconstructionPolicyTest, TestReconstructionCanceled) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
// Listen for an object.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Halfway through the reconstruction timeout, cancel the object
// reconstruction.
auto timer_period = boost::posix_time::milliseconds(reconstruction_timeout_ms_);
auto timer = std::make_shared<boost::asio::deadline_timer>(io_service_, timer_period);
timer->async_wait([this, timer, object_id](const boost::system::error_code &error) {
ASSERT_FALSE(error);
reconstruction_policy_->Cancel(object_id);
});
Run(reconstruction_timeout_ms_ * 2);
// Check that reconstruction is suppressed.
ASSERT_TRUE(reconstructed_tasks_.empty());
// Listen for the object again.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that this time, reconstruction is triggered.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
TEST_F(ReconstructionPolicyTest, TestSimultaneousReconstructionSuppressed) {
TaskID task_id = ForNormalTask();
ObjectID object_id =
ObjectID::ForTaskReturn(task_id, /*index=*/1, /*transport_type=*/0);
// Log a reconstruction attempt to simulate a different node attempting the
// reconstruction first. This should suppress this node's first attempt at
// reconstruction.
auto task_reconstruction_data = std::make_shared<TaskReconstructionData>();
task_reconstruction_data->set_task_id(task_id.Binary());
task_reconstruction_data->set_node_manager_id(ClientID::FromRandom().Binary());
task_reconstruction_data->set_num_reconstructions(0);
RAY_CHECK_OK(mock_gcs_->Tasks().AttemptTaskReconstruction(
task_reconstruction_data,
/*done=*/
[](Status status) { ASSERT_TRUE(status.ok()); }));
// Listen for an object.
reconstruction_policy_->ListenAndMaybeReconstruct(object_id);
// Run the test for longer than the reconstruction timeout.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that reconstruction is suppressed by the reconstruction attempt
// logged by the other node.
ASSERT_TRUE(reconstructed_tasks_.empty());
// Run the test for longer than the reconstruction timeout again.
Run(reconstruction_timeout_ms_ * 1.1);
// Check that this time, reconstruction is triggered, since we did not
// receive a task lease notification from the other node yet and our next
// attempt to reconstruct adds an entry at the next index in the
// TaskReconstructionLog.
ASSERT_EQ(reconstructed_tasks_[task_id], 1);
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/scheduling_policy.cc
|
C++
|
#include <algorithm>
#include <chrono>
#include <random>
#include "scheduling_policy.h"
#include "ray/util/logging.h"
namespace ray {
namespace raylet {
SchedulingPolicy::SchedulingPolicy(const SchedulingQueue &scheduling_queue)
: scheduling_queue_(scheduling_queue),
gen_(std::chrono::high_resolution_clock::now().time_since_epoch().count()) {}
std::unordered_map<TaskID, ClientID> SchedulingPolicy::Schedule(
std::unordered_map<ClientID, SchedulingResources> &cluster_resources,
const ClientID &local_client_id) {
// The policy decision to be returned.
std::unordered_map<TaskID, ClientID> decision;
#ifndef NDEBUG
RAY_LOG(DEBUG) << "Cluster resource map: ";
for (const auto &client_resource_pair : cluster_resources) {
// pair = ClientID, SchedulingResources
const ClientID &client_id = client_resource_pair.first;
const SchedulingResources &resources = client_resource_pair.second;
RAY_LOG(DEBUG) << "client_id: " << client_id << " "
<< resources.GetAvailableResources().ToString();
}
#endif
// We expect all placeable tasks to be placed on exit from this policy method.
RAY_CHECK(scheduling_queue_.GetTasks(TaskState::PLACEABLE).size() <= 1);
// Iterate over running tasks, get their resource demand and try to schedule.
for (const auto &t : scheduling_queue_.GetTasks(TaskState::PLACEABLE)) {
// Get task's resource demand
const auto &spec = t.GetTaskSpecification();
const auto &resource_demand = spec.GetRequiredPlacementResources();
const TaskID &task_id = spec.TaskId();
// TODO(atumanov): try to place tasks locally first.
// Construct a set of viable node candidates and randomly pick between them.
// Get all the client id keys and randomly pick.
std::vector<ClientID> client_keys;
for (const auto &client_resource_pair : cluster_resources) {
// pair = ClientID, SchedulingResources
ClientID node_client_id = client_resource_pair.first;
const auto &node_resources = client_resource_pair.second;
ResourceSet available_node_resources =
ResourceSet(node_resources.GetAvailableResources());
// We have to subtract the current "load" because we set the current "load"
// to be the resources used by tasks that are in the
// `SchedulingQueue::ready_queue_` in NodeManager::HandleWorkerAvailable's
// call to SchedulingQueue::GetResourceLoad.
available_node_resources.SubtractResources(node_resources.GetLoadResources());
RAY_LOG(DEBUG) << "client_id " << node_client_id
<< " avail: " << node_resources.GetAvailableResources().ToString()
<< " load: " << node_resources.GetLoadResources().ToString();
if (resource_demand.IsSubset(available_node_resources)) {
// This node is a feasible candidate.
client_keys.push_back(node_client_id);
}
}
if (!client_keys.empty()) {
// Choose index at random.
// Initialize a uniform integer distribution over the key space.
// TODO(atumanov): change uniform random to discrete, weighted by resource capacity.
std::uniform_int_distribution<int> distribution(0, client_keys.size() - 1);
int client_key_index = distribution(gen_);
const ClientID &dst_client_id = client_keys[client_key_index];
decision[task_id] = dst_client_id;
// Update dst_client_id's load to keep track of remote task load until
// the next heartbeat.
ResourceSet new_load(cluster_resources[dst_client_id].GetLoadResources());
new_load.AddResources(resource_demand);
cluster_resources[dst_client_id].SetLoadResources(std::move(new_load));
} else {
// If the task doesn't fit, place randomly subject to hard constraints.
for (const auto &client_resource_pair2 : cluster_resources) {
// pair = ClientID, SchedulingResources
ClientID node_client_id = client_resource_pair2.first;
const auto &node_resources = client_resource_pair2.second;
if (resource_demand.IsSubset(node_resources.GetTotalResources())) {
// This node is a feasible candidate.
client_keys.push_back(node_client_id);
}
}
// client candidate list constructed, pick randomly.
if (!client_keys.empty()) {
// Choose index at random.
// Initialize a uniform integer distribution over the key space.
// TODO(atumanov): change uniform random to discrete, weighted by resource
// capacity.
std::uniform_int_distribution<int> distribution(0, client_keys.size() - 1);
int client_key_index = distribution(gen_);
const ClientID &dst_client_id = client_keys[client_key_index];
decision[task_id] = dst_client_id;
// Update dst_client_id's load to keep track of remote task load until
// the next heartbeat.
ResourceSet new_load(cluster_resources[dst_client_id].GetLoadResources());
new_load.AddResources(resource_demand);
cluster_resources[dst_client_id].SetLoadResources(std::move(new_load));
} else {
// There are no nodes that can feasibly execute this task. The task remains
// placeable until cluster capacity becomes available.
// TODO(rkn): Propagate a warning to the user.
RAY_LOG(INFO) << "The task with ID " << task_id << " requires "
<< spec.GetRequiredResources().ToString() << " for execution and "
<< spec.GetRequiredPlacementResources().ToString()
<< " for placement, but no nodes have the necessary resources. "
<< "Check the client table to view node resources.";
}
}
}
return decision;
}
std::vector<TaskID> SchedulingPolicy::SpillOver(
SchedulingResources &remote_scheduling_resources) const {
// The policy decision to be returned.
std::vector<TaskID> decision;
ResourceSet new_load(remote_scheduling_resources.GetLoadResources());
// Check if we can accommodate infeasible tasks.
for (const auto &task : scheduling_queue_.GetTasks(TaskState::INFEASIBLE)) {
const auto &spec = task.GetTaskSpecification();
const auto &placement_resources = spec.GetRequiredPlacementResources();
if (placement_resources.IsSubset(remote_scheduling_resources.GetTotalResources())) {
decision.push_back(spec.TaskId());
new_load.AddResources(spec.GetRequiredResources());
}
}
// Try to accommodate up to a single ready task.
for (const auto &task : scheduling_queue_.GetTasks(TaskState::READY)) {
const auto &spec = task.GetTaskSpecification();
if (!spec.IsActorTask()) {
// Make sure the node has enough available resources to prevent forwarding cycles.
if (spec.GetRequiredPlacementResources().IsSubset(
remote_scheduling_resources.GetAvailableResources())) {
decision.push_back(spec.TaskId());
new_load.AddResources(spec.GetRequiredResources());
break;
}
}
}
remote_scheduling_resources.SetLoadResources(std::move(new_load));
return decision;
}
SchedulingPolicy::~SchedulingPolicy() {}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/scheduling_policy.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_SCHEDULING_POLICY_H
#define RAY_RAYLET_SCHEDULING_POLICY_H
#include <random>
#include <unordered_map>
#include "ray/common/task/scheduling_resources.h"
#include "ray/raylet/scheduling_queue.h"
namespace ray {
namespace raylet {
/// \class SchedulingPolicy
/// \brief Implements a scheduling policy for the node manager.
class SchedulingPolicy {
public:
/// \brief SchedulingPolicy constructor.
///
/// \param scheduling_queue: reference to a scheduler queues object for access to
/// tasks.
/// \return Void.
SchedulingPolicy(const SchedulingQueue &scheduling_queue);
/// \brief Perform a scheduling operation, given a set of cluster resources and
/// producing a mapping of tasks to raylets.
///
/// \param cluster_resources: a set of cluster resources containing resource and load
/// information for some subset of the cluster. For all client IDs in the returned
/// placement map, the corresponding SchedulingResources::resources_load_ is
/// incremented by the aggregate resource demand of the tasks assigned to it.
/// \param local_client_id The ID of the node manager that owns this
/// SchedulingPolicy object.
/// \return Scheduling decision, mapping tasks to raylets for placement.
std::unordered_map<TaskID, ClientID> Schedule(
std::unordered_map<ClientID, SchedulingResources> &cluster_resources,
const ClientID &local_client_id);
/// \brief Given a set of cluster resources perform a spill-over scheduling operation.
///
/// \param cluster_resources: a set of cluster resources containing resource and load
/// information for some subset of the cluster. For all client IDs in the returned
/// placement map, the corresponding SchedulingResources::resources_load_ is
/// incremented by the aggregate resource demand of the tasks assigned to it.
/// \return Scheduling decision, mapping tasks to raylets for placement.
std::vector<TaskID> SpillOver(SchedulingResources &remote_scheduling_resources) const;
/// \brief SchedulingPolicy destructor.
virtual ~SchedulingPolicy();
private:
/// An immutable reference to the scheduling task queues.
const SchedulingQueue &scheduling_queue_;
/// Internally maintained random number generator.
std::mt19937_64 gen_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_SCHEDULING_POLICY_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/scheduling_queue.cc
|
C++
|
#include "scheduling_queue.h"
#include <sstream>
#include "ray/common/status.h"
#include "ray/stats/stats.h"
namespace {
static constexpr const char *task_state_strings[] = {
"placeable", "waiting", "ready",
"running", "infeasible", "waiting for actor creation",
"swap"};
static_assert(sizeof(task_state_strings) / sizeof(const char *) ==
static_cast<int>(ray::raylet::TaskState::kNumTaskQueues),
"Must specify a TaskState name for every task queue");
inline const char *GetTaskStateString(ray::raylet::TaskState task_state) {
return task_state_strings[static_cast<int>(task_state)];
}
// Helper function to get tasks for a job from a given state.
template <typename TaskQueue>
inline void GetTasksForJobFromQueue(const TaskQueue &queue, const ray::JobID &job_id,
std::unordered_set<ray::TaskID> &task_ids) {
const auto &tasks = queue.GetTasks();
for (const auto &task : tasks) {
auto const &spec = task.GetTaskSpecification();
if (job_id == spec.JobId()) {
task_ids.insert(spec.TaskId());
}
}
}
// Helper function to get tasks for an actor from a given state.
template <typename TaskQueue>
inline void GetActorTasksFromQueue(const TaskQueue &queue, const ray::ActorID &actor_id,
std::unordered_set<ray::TaskID> &task_ids) {
const auto &tasks = queue.GetTasks();
for (const auto &task : tasks) {
auto const &spec = task.GetTaskSpecification();
if (spec.IsActorTask() && actor_id == spec.ActorId()) {
task_ids.insert(spec.TaskId());
}
}
}
} // namespace
namespace ray {
namespace raylet {
bool TaskQueue::AppendTask(const TaskID &task_id, const Task &task) {
RAY_CHECK(task_map_.find(task_id) == task_map_.end());
auto list_iterator = task_list_.insert(task_list_.end(), task);
task_map_[task_id] = list_iterator;
// Resource bookkeeping
current_resource_load_.AddResources(task.GetTaskSpecification().GetRequiredResources());
return true;
}
bool TaskQueue::RemoveTask(const TaskID &task_id, std::vector<Task> *removed_tasks) {
auto task_found_iterator = task_map_.find(task_id);
if (task_found_iterator == task_map_.end()) {
return false;
}
auto list_iterator = task_found_iterator->second;
// Resource bookkeeping
current_resource_load_.SubtractResourcesStrict(
list_iterator->GetTaskSpecification().GetRequiredResources());
if (removed_tasks) {
removed_tasks->push_back(std::move(*list_iterator));
}
task_map_.erase(task_found_iterator);
task_list_.erase(list_iterator);
return true;
}
bool TaskQueue::HasTask(const TaskID &task_id) const {
return task_map_.find(task_id) != task_map_.end();
}
const std::list<Task> &TaskQueue::GetTasks() const { return task_list_; }
const Task &TaskQueue::GetTask(const TaskID &task_id) const {
auto it = task_map_.find(task_id);
RAY_CHECK(it != task_map_.end());
return *it->second;
}
const ResourceSet &TaskQueue::GetCurrentResourceLoad() const {
return current_resource_load_;
}
bool ReadyQueue::AppendTask(const TaskID &task_id, const Task &task) {
const auto &scheduling_class = task.GetTaskSpecification().GetSchedulingClass();
tasks_by_class_[scheduling_class].push_back(task_id);
return TaskQueue::AppendTask(task_id, task);
}
bool ReadyQueue::RemoveTask(const TaskID &task_id, std::vector<Task> *removed_tasks) {
if (task_map_.find(task_id) != task_map_.end()) {
const auto &scheduling_class =
task_map_[task_id]->GetTaskSpecification().GetSchedulingClass();
tasks_by_class_[scheduling_class].erase(task_id);
}
return TaskQueue::RemoveTask(task_id, removed_tasks);
}
const std::unordered_map<SchedulingClass, ordered_set<TaskID>>
&ReadyQueue::GetTasksByClass() const {
return tasks_by_class_;
}
const std::list<Task> &SchedulingQueue::GetTasks(TaskState task_state) const {
const auto &queue = GetTaskQueue(task_state);
return queue->GetTasks();
}
const std::unordered_map<SchedulingClass, ordered_set<TaskID>>
&SchedulingQueue::GetReadyTasksByClass() const {
return ready_queue_->GetTasksByClass();
}
const Task &SchedulingQueue::GetTaskOfState(const TaskID &task_id,
TaskState task_state) const {
const auto &queue = GetTaskQueue(task_state);
return queue->GetTask(task_id);
}
ResourceSet SchedulingQueue::GetResourceLoad() const {
auto load = ready_queue_->GetCurrentResourceLoad();
// Also take into account infeasible tasks so they show up for autoscaling.
load.AddResources(
task_queues_[static_cast<int>(TaskState::INFEASIBLE)]->GetCurrentResourceLoad());
return load;
}
const std::unordered_set<TaskID> &SchedulingQueue::GetBlockedTaskIds() const {
return blocked_task_ids_;
}
void SchedulingQueue::FilterStateFromQueue(std::unordered_set<ray::TaskID> &task_ids,
TaskState task_state) const {
auto &queue = GetTaskQueue(task_state);
for (auto it = task_ids.begin(); it != task_ids.end();) {
if (queue->HasTask(*it)) {
it = task_ids.erase(it);
} else {
it++;
}
}
}
void SchedulingQueue::FilterState(std::unordered_set<TaskID> &task_ids,
TaskState filter_state) const {
switch (filter_state) {
case TaskState::PLACEABLE:
FilterStateFromQueue(task_ids, TaskState::PLACEABLE);
break;
case TaskState::WAITING_FOR_ACTOR_CREATION:
FilterStateFromQueue(task_ids, TaskState::WAITING_FOR_ACTOR_CREATION);
break;
case TaskState::WAITING:
FilterStateFromQueue(task_ids, TaskState::WAITING);
break;
case TaskState::READY:
FilterStateFromQueue(task_ids, TaskState::READY);
break;
case TaskState::RUNNING:
FilterStateFromQueue(task_ids, TaskState::RUNNING);
break;
case TaskState::INFEASIBLE:
FilterStateFromQueue(task_ids, TaskState::INFEASIBLE);
break;
case TaskState::SWAP:
FilterStateFromQueue(task_ids, TaskState::SWAP);
break;
case TaskState::BLOCKED: {
const auto blocked_ids = GetBlockedTaskIds();
for (auto it = task_ids.begin(); it != task_ids.end();) {
if (blocked_ids.count(*it) == 1) {
it = task_ids.erase(it);
} else {
it++;
}
}
} break;
case TaskState::DRIVER: {
const auto driver_task_ids = GetDriverTaskIds();
for (auto it = task_ids.begin(); it != task_ids.end();) {
if (driver_task_ids.count(*it) == 1) {
it = task_ids.erase(it);
} else {
it++;
}
}
} break;
default:
RAY_LOG(FATAL) << "Attempting to filter tasks on unrecognized state "
<< static_cast<std::underlying_type<TaskState>::type>(filter_state);
}
}
const std::shared_ptr<TaskQueue> &SchedulingQueue::GetTaskQueue(
TaskState task_state) const {
RAY_CHECK(task_state < TaskState::kNumTaskQueues)
<< static_cast<int>(task_state) << "Task state " << static_cast<int>(task_state)
<< " does not correspond to a task queue";
return task_queues_[static_cast<int>(task_state)];
}
// Helper function to remove tasks in the given set of task_ids from a
// queue, and append them to the given vector removed_tasks.
void SchedulingQueue::RemoveTasksFromQueue(ray::raylet::TaskState task_state,
std::unordered_set<ray::TaskID> &task_ids,
std::vector<ray::Task> *removed_tasks) {
auto &queue = GetTaskQueue(task_state);
for (auto it = task_ids.begin(); it != task_ids.end();) {
const auto &task_id = *it;
if (queue->RemoveTask(task_id, removed_tasks)) {
RAY_LOG(DEBUG) << "Removed task " << task_id << " from "
<< GetTaskStateString(task_state) << " queue";
if (task_state == TaskState::RUNNING) {
num_running_tasks_
[removed_tasks->back().GetTaskSpecification().GetSchedulingClass()] -= 1;
}
it = task_ids.erase(it);
} else {
it++;
}
}
}
std::vector<Task> SchedulingQueue::RemoveTasks(std::unordered_set<TaskID> &task_ids) {
// List of removed tasks to be returned.
std::vector<Task> removed_tasks;
// Try to find the tasks to remove from the queues.
for (const auto &task_state : {
TaskState::PLACEABLE,
TaskState::WAITING,
TaskState::READY,
TaskState::RUNNING,
TaskState::INFEASIBLE,
TaskState::WAITING_FOR_ACTOR_CREATION,
TaskState::SWAP,
}) {
RemoveTasksFromQueue(task_state, task_ids, &removed_tasks);
}
RAY_CHECK(task_ids.size() == 0);
return removed_tasks;
}
bool SchedulingQueue::RemoveTask(const TaskID &task_id, Task *removed_task,
TaskState *removed_task_state) {
std::vector<Task> removed_tasks;
std::unordered_set<TaskID> task_id_set = {task_id};
// Try to find the task to remove in the queues.
for (const auto &task_state : {
TaskState::PLACEABLE,
TaskState::WAITING,
TaskState::READY,
TaskState::RUNNING,
TaskState::INFEASIBLE,
TaskState::WAITING_FOR_ACTOR_CREATION,
TaskState::SWAP,
}) {
RemoveTasksFromQueue(task_state, task_id_set, &removed_tasks);
if (task_id_set.empty()) {
// The task was removed from the current queue.
if (removed_task_state != nullptr) {
// If the state of the removed task was requested, then set it with the
// current queue's state.
*removed_task_state = task_state;
}
break;
}
}
// Make sure we got the removed task.
if (removed_tasks.size() == 1) {
*removed_task = removed_tasks.front();
RAY_CHECK(removed_task->GetTaskSpecification().TaskId() == task_id);
return true;
}
RAY_LOG(DEBUG) << "Task " << task_id
<< " that is to be removed could not be found any more."
<< " Probably its driver was removed.";
return false;
}
void SchedulingQueue::MoveTasks(std::unordered_set<TaskID> &task_ids, TaskState src_state,
TaskState dst_state) {
std::vector<Task> removed_tasks;
// Remove the tasks from the specified source queue.
switch (src_state) {
case TaskState::PLACEABLE:
RemoveTasksFromQueue(TaskState::PLACEABLE, task_ids, &removed_tasks);
break;
case TaskState::WAITING:
RemoveTasksFromQueue(TaskState::WAITING, task_ids, &removed_tasks);
break;
case TaskState::READY:
RemoveTasksFromQueue(TaskState::READY, task_ids, &removed_tasks);
break;
case TaskState::RUNNING:
RemoveTasksFromQueue(TaskState::RUNNING, task_ids, &removed_tasks);
break;
case TaskState::INFEASIBLE:
RemoveTasksFromQueue(TaskState::INFEASIBLE, task_ids, &removed_tasks);
break;
case TaskState::SWAP:
RemoveTasksFromQueue(TaskState::SWAP, task_ids, &removed_tasks);
break;
default:
RAY_LOG(FATAL) << "Attempting to move tasks from unrecognized state "
<< static_cast<std::underlying_type<TaskState>::type>(src_state);
}
// Make sure that all tasks were able to be moved.
RAY_CHECK(task_ids.empty());
// Add the tasks to the specified destination queue.
switch (dst_state) {
case TaskState::PLACEABLE:
QueueTasks(removed_tasks, TaskState::PLACEABLE);
break;
case TaskState::WAITING:
QueueTasks(removed_tasks, TaskState::WAITING);
break;
case TaskState::READY:
QueueTasks(removed_tasks, TaskState::READY);
break;
case TaskState::RUNNING:
QueueTasks(removed_tasks, TaskState::RUNNING);
break;
case TaskState::INFEASIBLE:
QueueTasks(removed_tasks, TaskState::INFEASIBLE);
break;
case TaskState::SWAP:
QueueTasks(removed_tasks, TaskState::SWAP);
break;
default:
RAY_LOG(FATAL) << "Attempting to move tasks to unrecognized state "
<< static_cast<std::underlying_type<TaskState>::type>(dst_state);
}
}
void SchedulingQueue::QueueTasks(const std::vector<Task> &tasks, TaskState task_state) {
auto &queue = GetTaskQueue(task_state);
for (const auto &task : tasks) {
RAY_LOG(DEBUG) << "Added task " << task.GetTaskSpecification().TaskId() << " to "
<< GetTaskStateString(task_state) << " queue";
if (task_state == TaskState::RUNNING) {
num_running_tasks_[task.GetTaskSpecification().GetSchedulingClass()] += 1;
}
queue->AppendTask(task.GetTaskSpecification().TaskId(), task);
}
}
bool SchedulingQueue::HasTask(const TaskID &task_id) const {
for (const auto &task_queue : task_queues_) {
if (task_queue->HasTask(task_id)) {
return true;
}
}
return false;
}
std::unordered_set<TaskID> SchedulingQueue::GetTaskIdsForJob(const JobID &job_id) const {
std::unordered_set<TaskID> task_ids;
for (const auto &task_queue : task_queues_) {
GetTasksForJobFromQueue(*task_queue, job_id, task_ids);
}
return task_ids;
}
std::unordered_set<TaskID> SchedulingQueue::GetTaskIdsForActor(
const ActorID &actor_id) const {
std::unordered_set<TaskID> task_ids;
int swap = static_cast<int>(TaskState::SWAP);
int i = 0;
for (const auto &task_queue : task_queues_) {
// This is a hack to make sure that we don't remove tasks from the SWAP
// queue, since these are always guaranteed to be removed and eventually
// resubmitted if necessary by the node manager.
if (i != swap) {
GetActorTasksFromQueue(*task_queue, actor_id, task_ids);
}
i++;
}
return task_ids;
}
void SchedulingQueue::AddBlockedTaskId(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Added blocked task " << task_id;
auto inserted = blocked_task_ids_.insert(task_id);
RAY_CHECK(inserted.second);
}
void SchedulingQueue::RemoveBlockedTaskId(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Removed blocked task " << task_id;
auto erased = blocked_task_ids_.erase(task_id);
RAY_CHECK(erased == 1);
}
void SchedulingQueue::AddDriverTaskId(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Added driver task " << task_id;
auto inserted = driver_task_ids_.insert(task_id);
RAY_CHECK(inserted.second);
}
void SchedulingQueue::RemoveDriverTaskId(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Removed driver task " << task_id;
auto erased = driver_task_ids_.erase(task_id);
RAY_CHECK(erased == 1);
}
const std::unordered_set<TaskID> &SchedulingQueue::GetDriverTaskIds() const {
return driver_task_ids_;
}
int SchedulingQueue::NumRunning(const SchedulingClass &cls) const {
auto it = num_running_tasks_.find(cls);
if (it == num_running_tasks_.end()) {
return 0;
} else {
return it->second;
}
}
std::string SchedulingQueue::DebugString() const {
std::stringstream result;
result << "SchedulingQueue:";
for (size_t i = 0; i < static_cast<int>(ray::raylet::TaskState::kNumTaskQueues); i++) {
TaskState task_state = static_cast<TaskState>(i);
result << "\n- num " << GetTaskStateString(task_state)
<< " tasks: " << GetTaskQueue(task_state)->GetTasks().size();
}
result << "\n- num tasks blocked: " << blocked_task_ids_.size();
result << "\nScheduledTaskCounts:";
size_t total = 0;
for (const auto &pair : num_running_tasks_) {
result << "\n- ";
auto desc = TaskSpecification::GetSchedulingClassDescriptor(pair.first);
for (const auto &str : desc.second) {
// Only print the ASCII parts of the function descriptor.
bool ok = str.size() > 0;
for (char c : str) {
if (!isprint(c)) {
ok = false;
}
}
if (ok) {
result << str;
result << ".";
}
}
result << desc.first.ToString();
result << ": " << pair.second;
total += pair.second;
}
RAY_CHECK(total == GetTaskQueue(TaskState::RUNNING)->GetTasks().size())
<< total << " vs " << GetTaskQueue(TaskState::RUNNING)->GetTasks().size();
return result.str();
}
void SchedulingQueue::RecordMetrics() const {
for (size_t i = 0; i < static_cast<int>(ray::raylet::TaskState::kNumTaskQueues); i++) {
TaskState task_state = static_cast<TaskState>(i);
stats::SchedulingQueueStats().Record(
static_cast<double>(GetTaskQueue(task_state)->GetTasks().size()),
{{stats::ValueTypeKey,
std::string("num_") + GetTaskStateString(task_state) + "_tasks"}});
}
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/scheduling_queue.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_SCHEDULING_QUEUE_H
#define RAY_RAYLET_SCHEDULING_QUEUE_H
#include <array>
#include <list>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "ray/common/task/task.h"
#include "ray/util/logging.h"
#include "ray/util/ordered_set.h"
namespace ray {
namespace raylet {
enum class TaskState {
// The task may be placed on a node.
PLACEABLE,
// The task has been placed on a node and is waiting for some object
// dependencies to become local.
WAITING,
// The task has been placed on a node, all dependencies are satisfied, and is
// waiting for resources to run.
READY,
// The task is running on a worker. The task may also be blocked in a ray.get
// or ray.wait call, in which case it also has state BLOCKED.
RUNNING,
// The task has resources that cannot be satisfied by any node, as far as we
// know.
INFEASIBLE,
// The task is an actor method and is waiting to learn where the actor was
// created.
WAITING_FOR_ACTOR_CREATION,
// Swap queue for tasks that are in between states. This can happen when a
// task is removed from one queue, and an async callback is responsible for
// re-queuing the task. For example, a READY task that has just been assigned
// to a worker will get moved to the SWAP queue while waiting for a response
// from the worker. If the worker accepts the task, the task will be added to
// the RUNNING queue, else it will be returned to READY.
SWAP,
// The number of task queues. All states that precede this enum must have an
// associated TaskQueue in SchedulingQueue. All states that succeed
// this enum do not have an associated TaskQueue, since the tasks
// in those states may not have any associated task data.
kNumTaskQueues,
// The task is running but blocked in a ray.get or ray.wait call. Tasks that
// were explicitly assigned by us may be both BLOCKED and RUNNING, while
// tasks that were created out-of-band (e.g., the application created
// multiple threads) are only BLOCKED.
BLOCKED,
// The task is a driver task.
DRIVER,
};
class TaskQueue {
public:
/// TaskQueue destructor.
virtual ~TaskQueue() {}
/// \brief Append a task to queue.
///
/// \param task_id The task ID for the task to append.
/// \param task The task to append to the queue.
/// \return Whether the append operation succeeds.
virtual bool AppendTask(const TaskID &task_id, const Task &task);
/// \brief Remove a task from queue.
///
/// \param task_id The task ID for the task to remove from the queue.
/// \param removed_tasks If the task specified by task_id is successfully
/// removed from the queue, the task data is appended to the vector. Can
/// be a nullptr, in which case nothing is appended.
/// \return Whether the removal succeeds.
virtual bool RemoveTask(const TaskID &task_id,
std::vector<Task> *removed_tasks = nullptr);
/// \brief Check if the queue contains a specific task id.
///
/// \param task_id The task ID for the task.
/// \return Whether the task_id exists in this queue.
bool HasTask(const TaskID &task_id) const;
/// \brief Return the task list of the queue.
///
/// \return A list of tasks contained in this queue.
const std::list<Task> &GetTasks() const;
/// Get a task from the queue. The caller must ensure that the task is in
/// the queue.
///
/// \return The task.
const Task &GetTask(const TaskID &task_id) const;
/// \brief Get the total resources required by the tasks in the queue.
///
/// \return Total resources required by the tasks in the queue.
const ResourceSet &GetCurrentResourceLoad() const;
protected:
/// A list of tasks.
std::list<Task> task_list_;
/// A hash to speed up looking up a task.
std::unordered_map<TaskID, std::list<Task>::iterator> task_map_;
/// Aggregate resources of all the tasks in this queue.
ResourceSet current_resource_load_;
};
class ReadyQueue : public TaskQueue {
public:
ReadyQueue(){};
ReadyQueue(const ReadyQueue &other) = delete;
/// ReadyQueue destructor.
virtual ~ReadyQueue() {}
/// \brief Append a task to queue.
///
/// \param task_id The task ID for the task to append.
/// \param task The task to append to the queue.
/// \return Whether the append operation succeeds.
bool AppendTask(const TaskID &task_id, const Task &task) override;
/// \brief Remove a task from queue.
///
/// \param task_id The task ID for the task to remove from the queue.
/// \return Whether the removal succeeds.
bool RemoveTask(const TaskID &task_id, std::vector<Task> *removed_tasks) override;
/// \brief Get a mapping from resource shape to tasks.
///
/// \return Mapping from resource set to task IDs with these resource requirements.
const std::unordered_map<SchedulingClass, ordered_set<TaskID>> &GetTasksByClass() const;
private:
/// Index from task description to tasks queued of that type.
std::unordered_map<SchedulingClass, ordered_set<TaskID>> tasks_by_class_;
};
/// \class SchedulingQueue
///
/// Encapsulates task queues.
// (See design_docs/task_states.rst for the state transition diagram.)
class SchedulingQueue {
public:
/// Create a scheduling queue.
SchedulingQueue() : ready_queue_(std::make_shared<ReadyQueue>()) {
for (const auto &task_state : {
TaskState::PLACEABLE,
TaskState::WAITING,
TaskState::READY,
TaskState::RUNNING,
TaskState::INFEASIBLE,
TaskState::WAITING_FOR_ACTOR_CREATION,
TaskState::SWAP,
}) {
if (task_state == TaskState::READY) {
task_queues_[static_cast<int>(task_state)] = ready_queue_;
} else {
task_queues_[static_cast<int>(task_state)] = std::make_shared<TaskQueue>();
}
}
}
/// SchedulingQueue destructor.
virtual ~SchedulingQueue() {}
/// \brief Check if the queue contains a specific task id.
///
/// \param task_id The task ID for the task.
/// \return Whether the task_id exists in the queue.
bool HasTask(const TaskID &task_id) const;
/// \brief Get all tasks in the given state.
///
/// \param task_state The requested task state. This must correspond to one
/// of the task queues (has value < TaskState::kNumTaskQueues).
const std::list<Task> &GetTasks(TaskState task_state) const;
/// Get a reference to the queue of ready tasks.
///
/// \return A reference to the queue of ready tasks.
const std::unordered_map<SchedulingClass, ordered_set<TaskID>> &GetReadyTasksByClass()
const;
/// Get a task from the queue of a given state. The caller must ensure that
/// the task has the given state.
///
/// \param task_id The task to get.
/// \param task_state The state that the requested task should be in.
/// \return The task.
const Task &GetTaskOfState(const TaskID &task_id, TaskState task_state) const;
/// \brief Return an aggregate resource set for all tasks exerting load on this raylet.
///
/// \return A resource set with aggregate resource information about resource load on
/// this raylet.
ResourceSet GetResourceLoad() const;
/// Get the tasks in the blocked state.
///
/// \return A const reference to the tasks that are are blocked on a data
/// dependency discovered to be missing at runtime. These include RUNNING
/// tasks that were explicitly assigned to a worker by us, as well as tasks
/// that were created out-of-band (e.g., the application created
// multiple threads) are only BLOCKED.
const std::unordered_set<TaskID> &GetBlockedTaskIds() const;
/// Get the set of driver task IDs.
///
/// \return A const reference to the set of driver task IDs. These are empty
/// tasks used to represent drivers.
const std::unordered_set<TaskID> &GetDriverTaskIds() const;
/// Remove tasks from the task queue.
///
/// \param task_ids The set of task IDs to remove from the queue. The
/// corresponding tasks must be contained in the queue. The IDs of removed
/// tasks will be erased from the set.
/// \return A vector of the tasks that were removed.
std::vector<Task> RemoveTasks(std::unordered_set<TaskID> &task_ids);
/// Remove a task from the task queue.
///
/// \param task_id The task ID to remove from the queue. The corresponding
/// task must be contained in the queue.
/// \param task The removed task will be written here, if any.
/// \param task_state If this is not nullptr, then the state of the removed
/// task will be written here.
/// \return true if the task was removed, false if it is not in the queue.
bool RemoveTask(const TaskID &task_id, Task *removed_task,
TaskState *removed_task_state = nullptr);
/// Remove a driver task ID. This is an empty task used to represent a driver.
///
/// \param The driver task ID to remove.
void RemoveDriverTaskId(const TaskID &task_id);
/// Add tasks to the given queue.
///
/// \param tasks The tasks to queue.
/// \param task_state The state of the tasks to queue. The requested task
/// state must correspond to one of the task queues (has value <
/// TaskState::kNumTaskQueues).
void QueueTasks(const std::vector<Task> &tasks, TaskState task_state);
/// Add a task ID in the blocked state. These are tasks that have been
/// dispatched to a worker but are blocked on a data dependency that was
/// discovered to be missing at runtime.
///
/// \param task_id The task to mark as blocked.
void AddBlockedTaskId(const TaskID &task_id);
/// Remove a task ID in the blocked state. These are tasks that have been
/// dispatched to a worker but were blocked on a data dependency that was
/// discovered to be missing at runtime.
///
/// \param task_id The task to mark as unblocked.
void RemoveBlockedTaskId(const TaskID &task_id);
/// Add a driver task ID. This is an empty task used to represent a driver.
///
/// \param The driver task ID to add.
void AddDriverTaskId(const TaskID &task_id);
/// \brief Move the specified tasks from the source state to the destination
/// state.
///
/// \param tasks The set of task IDs to move. The IDs of successfully moved
/// tasks will be erased from the set.
/// \param src_state Source state, which corresponds to one of the internal
/// task queues.
/// \param dst_state Destination state, corresponding to one of the internal
/// task queues.
void MoveTasks(std::unordered_set<TaskID> &tasks, TaskState src_state,
TaskState dst_state);
/// \brief Filter out task IDs based on their scheduling state.
///
/// \param task_ids The set of task IDs to filter. All tasks that have the
/// given filter_state will be removed from this set.
/// \param filter_state The task state to filter out.
void FilterState(std::unordered_set<TaskID> &task_ids, TaskState filter_state) const;
/// \brief Get all the task IDs for a job.
///
/// \param job_id All the tasks that have the given job_id are returned.
/// \return All the tasks that have the given job ID.
std::unordered_set<TaskID> GetTaskIdsForJob(const JobID &job_id) const;
/// \brief Get all the task IDs for an actor.
///
/// \param actor_id All the tasks that have the given actor_id are returned.
/// \return All the tasks that have the given actor ID.
std::unordered_set<TaskID> GetTaskIdsForActor(const ActorID &actor_id) const;
/// \brief Return all resource demand associated with the ready queue.
///
/// \return Aggregate resource demand from ready tasks.
ResourceSet GetReadyQueueResources() const;
/// Returns the number of running tasks in this class.
///
/// \return int.
int NumRunning(const SchedulingClass &cls) const;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
private:
/// Get the task queue in the given state. The requested task state must
/// correspond to one of the task queues (has value <
/// TaskState::kNumTaskQueues).
const std::shared_ptr<TaskQueue> &GetTaskQueue(TaskState task_state) const;
/// A helper function to remove tasks from a given queue. The requested task
/// state must correspond to one of the task queues (has value <
/// TaskState::kNumTaskQueues).
void RemoveTasksFromQueue(ray::raylet::TaskState task_state,
std::unordered_set<ray::TaskID> &task_ids,
std::vector<ray::Task> *removed_tasks);
/// A helper function to filter out tasks of a given state from the set of
/// task IDs. The requested task state must correspond to one of the task
/// queues (has value < TaskState::kNumTaskQueues).
void FilterStateFromQueue(std::unordered_set<ray::TaskID> &task_ids,
TaskState task_state) const;
// A pointer to the ready queue.
const std::shared_ptr<ReadyQueue> ready_queue_;
/// Track the breakdown of tasks by class in the RUNNING queue.
std::unordered_map<SchedulingClass, int32_t> num_running_tasks_;
// A pointer to the task queues. These contain all tasks that have a task
// state < TaskState::kNumTaskQueues.
std::array<std::shared_ptr<TaskQueue>, static_cast<int>(TaskState::kNumTaskQueues)>
task_queues_;
/// Tasks that were dispatched to a worker but are blocked on a data
/// dependency that was missing at runtime.
std::unordered_set<TaskID> blocked_task_ids_;
/// The set of currently running driver tasks. These are empty tasks that are
/// started by a driver process on initialization.
std::unordered_set<TaskID> driver_task_ids_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_SCHEDULING_QUEUE_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/task_dependency_manager.cc
|
C++
|
#include "task_dependency_manager.h"
#include "absl/time/clock.h"
#include "ray/stats/stats.h"
namespace ray {
namespace raylet {
TaskDependencyManager::TaskDependencyManager(
ObjectManagerInterface &object_manager,
ReconstructionPolicyInterface &reconstruction_policy,
boost::asio::io_service &io_service, const ClientID &client_id,
int64_t initial_lease_period_ms, std::shared_ptr<gcs::GcsClient> gcs_client)
: object_manager_(object_manager),
reconstruction_policy_(reconstruction_policy),
io_service_(io_service),
client_id_(client_id),
initial_lease_period_ms_(initial_lease_period_ms),
gcs_client_(gcs_client) {}
bool TaskDependencyManager::CheckObjectLocal(const ObjectID &object_id) const {
return local_objects_.count(object_id) == 1;
}
bool TaskDependencyManager::CheckObjectRequired(const ObjectID &object_id) const {
const TaskID task_id = object_id.TaskId();
auto task_entry = required_tasks_.find(task_id);
// If there are no subscribed tasks that are dependent on the object, then do
// nothing.
if (task_entry == required_tasks_.end()) {
return false;
}
if (task_entry->second.count(object_id) == 0) {
return false;
}
// If the object is already local, then the dependency is fulfilled. Do
// nothing.
if (local_objects_.count(object_id) == 1) {
return false;
}
// If the task that creates the object is pending execution, then the
// dependency will be fulfilled locally. Do nothing.
if (pending_tasks_.count(task_id) == 1) {
return false;
}
return true;
}
void TaskDependencyManager::HandleRemoteDependencyRequired(const ObjectID &object_id) {
bool required = CheckObjectRequired(object_id);
// If the object is required, then try to make the object available locally.
if (required) {
auto inserted = required_objects_.insert(object_id);
if (inserted.second) {
// If we haven't already, request the object manager to pull it from a
// remote node.
RAY_CHECK_OK(object_manager_.Pull(object_id));
reconstruction_policy_.ListenAndMaybeReconstruct(object_id);
}
}
}
void TaskDependencyManager::HandleRemoteDependencyCanceled(const ObjectID &object_id) {
bool required = CheckObjectRequired(object_id);
// If the object is no longer required, then cancel the object.
if (!required) {
auto it = required_objects_.find(object_id);
if (it != required_objects_.end()) {
object_manager_.CancelPull(object_id);
reconstruction_policy_.Cancel(object_id);
required_objects_.erase(it);
}
}
}
std::vector<TaskID> TaskDependencyManager::HandleObjectLocal(
const ray::ObjectID &object_id) {
// Add the object to the table of locally available objects.
auto inserted = local_objects_.insert(object_id);
RAY_CHECK(inserted.second);
// Find all tasks and workers that depend on the newly available object.
std::vector<TaskID> ready_task_ids;
auto creating_task_entry = required_tasks_.find(object_id.TaskId());
if (creating_task_entry != required_tasks_.end()) {
auto object_entry = creating_task_entry->second.find(object_id);
if (object_entry != creating_task_entry->second.end()) {
// Loop through all tasks that depend on the newly available object.
for (const auto &dependent_task_id : object_entry->second.dependent_tasks) {
auto &task_entry = task_dependencies_[dependent_task_id];
task_entry.num_missing_get_dependencies--;
// If the dependent task now has all of its arguments ready, it's ready
// to run.
if (task_entry.num_missing_get_dependencies == 0) {
ready_task_ids.push_back(dependent_task_id);
}
}
// Remove the dependency from all workers that called `ray.wait` on the
// newly available object.
for (const auto &worker_id : object_entry->second.dependent_workers) {
RAY_CHECK(worker_dependencies_[worker_id].erase(object_id) > 0);
}
// Clear all workers that called `ray.wait` on this object, since the
// `ray.wait` calls can now return the object as ready.
object_entry->second.dependent_workers.clear();
// If there are no more tasks or workers dependent on the local object or
// the task that created it, then remove the entry completely.
if (object_entry->second.Empty()) {
creating_task_entry->second.erase(object_entry);
if (creating_task_entry->second.empty()) {
required_tasks_.erase(creating_task_entry);
}
}
}
}
// The object is now local, so cancel any in-progress operations to make the
// object local.
HandleRemoteDependencyCanceled(object_id);
return ready_task_ids;
}
std::vector<TaskID> TaskDependencyManager::HandleObjectMissing(
const ray::ObjectID &object_id) {
// Remove the object from the table of locally available objects.
auto erased = local_objects_.erase(object_id);
RAY_CHECK(erased == 1);
// Find any tasks that are dependent on the missing object.
std::vector<TaskID> waiting_task_ids;
TaskID creating_task_id = object_id.TaskId();
auto creating_task_entry = required_tasks_.find(creating_task_id);
if (creating_task_entry != required_tasks_.end()) {
auto object_entry = creating_task_entry->second.find(object_id);
if (object_entry != creating_task_entry->second.end()) {
for (auto &dependent_task_id : object_entry->second.dependent_tasks) {
auto &task_entry = task_dependencies_[dependent_task_id];
// If the dependent task had all of its arguments ready, it was ready to
// run but must be switched to waiting since one of its arguments is now
// missing.
if (task_entry.num_missing_get_dependencies == 0) {
waiting_task_ids.push_back(dependent_task_id);
// During normal execution we should be able to include the check
// RAY_CHECK(pending_tasks_.count(dependent_task_id) == 1);
// However, this invariant will not hold during unit test execution.
}
task_entry.num_missing_get_dependencies++;
}
}
}
// The object is no longer local. Try to make the object local if necessary.
HandleRemoteDependencyRequired(object_id);
// Process callbacks for all of the tasks dependent on the object that are
// now ready to run.
return waiting_task_ids;
}
bool TaskDependencyManager::SubscribeGetDependencies(
const TaskID &task_id, const std::vector<ObjectID> &required_objects) {
auto &task_entry = task_dependencies_[task_id];
// Record the task's dependencies.
for (const auto &object_id : required_objects) {
auto inserted = task_entry.get_dependencies.insert(object_id);
if (inserted.second) {
RAY_LOG(DEBUG) << "Task " << task_id << " blocked on object " << object_id;
// Get the ID of the task that creates the dependency.
TaskID creating_task_id = object_id.TaskId();
// Determine whether the dependency can be fulfilled by the local node.
if (local_objects_.count(object_id) == 0) {
// The object is not local.
task_entry.num_missing_get_dependencies++;
}
// Add the subscribed task to the mapping from object ID to list of
// dependent tasks.
required_tasks_[creating_task_id][object_id].dependent_tasks.insert(task_id);
}
}
// These dependencies are required by the given task. Try to make them local
// if necessary.
for (const auto &object_id : required_objects) {
HandleRemoteDependencyRequired(object_id);
}
// Return whether all dependencies are local.
return (task_entry.num_missing_get_dependencies == 0);
}
void TaskDependencyManager::SubscribeWaitDependencies(
const WorkerID &worker_id, const std::vector<ObjectID> &required_objects) {
auto &worker_entry = worker_dependencies_[worker_id];
// Record the worker's dependencies.
for (const auto &object_id : required_objects) {
if (local_objects_.count(object_id) == 0) {
RAY_LOG(DEBUG) << "Worker " << worker_id << " called ray.wait on remote object "
<< object_id;
// Only add the dependency if the object is not local. If the object is
// local, then the `ray.wait` call can already return it.
auto inserted = worker_entry.insert(object_id);
if (inserted.second) {
// Get the ID of the task that creates the dependency.
// TODO(qwang): Refine here to:
// if (object_id.CreatedByTask()) {// ...}
TaskID creating_task_id = object_id.TaskId();
// Add the subscribed worker to the mapping from object ID to list of
// dependent workers.
required_tasks_[creating_task_id][object_id].dependent_workers.insert(worker_id);
}
}
}
// These dependencies are required by the given worker. Try to make them
// local if necessary.
for (const auto &object_id : required_objects) {
HandleRemoteDependencyRequired(object_id);
}
}
bool TaskDependencyManager::UnsubscribeGetDependencies(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Task " << task_id << " no longer blocked";
// Remove the task from the table of subscribed tasks.
auto it = task_dependencies_.find(task_id);
if (it == task_dependencies_.end()) {
return false;
}
const TaskDependencies task_entry = std::move(it->second);
task_dependencies_.erase(it);
// Remove the task's dependencies.
for (const auto &object_id : task_entry.get_dependencies) {
// Get the ID of the task that creates the dependency.
TaskID creating_task_id = object_id.TaskId();
auto creating_task_entry = required_tasks_.find(creating_task_id);
// Remove the task from the list of tasks that are dependent on this
// object.
auto &dependent_tasks = creating_task_entry->second[object_id].dependent_tasks;
RAY_CHECK(dependent_tasks.erase(task_id) > 0);
// If nothing else depends on the object, then erase the object entry.
if (creating_task_entry->second[object_id].Empty()) {
creating_task_entry->second.erase(object_id);
// Remove the task that creates this object if there are no more object
// dependencies created by the task.
if (creating_task_entry->second.empty()) {
required_tasks_.erase(creating_task_entry);
}
}
}
// These dependencies are no longer required by the given task. Cancel any
// in-progress operations to make them local.
for (const auto &object_id : task_entry.get_dependencies) {
HandleRemoteDependencyCanceled(object_id);
}
return true;
}
void TaskDependencyManager::UnsubscribeWaitDependencies(const WorkerID &worker_id) {
RAY_LOG(DEBUG) << "Worker " << worker_id << " no longer blocked";
// Remove the task from the table of subscribed tasks.
auto it = worker_dependencies_.find(worker_id);
if (it == worker_dependencies_.end()) {
return;
}
const WorkerDependencies worker_entry = std::move(it->second);
worker_dependencies_.erase(it);
// Remove the task's dependencies.
for (const auto &object_id : worker_entry) {
// Get the ID of the task that creates the dependency.
TaskID creating_task_id = object_id.TaskId();
auto creating_task_entry = required_tasks_.find(creating_task_id);
// Remove the worker from the list of workers that are dependent on this
// object.
auto &dependent_workers = creating_task_entry->second[object_id].dependent_workers;
RAY_CHECK(dependent_workers.erase(worker_id) > 0);
// If nothing else depends on the object, then erase the object entry.
if (creating_task_entry->second[object_id].Empty()) {
creating_task_entry->second.erase(object_id);
// Remove the task that creates this object if there are no more object
// dependencies created by the task.
if (creating_task_entry->second.empty()) {
required_tasks_.erase(creating_task_entry);
}
}
}
// These dependencies are no longer required by the given task. Cancel any
// in-progress operations to make them local.
for (const auto &object_id : worker_entry) {
HandleRemoteDependencyCanceled(object_id);
}
}
std::vector<TaskID> TaskDependencyManager::GetPendingTasks() const {
std::vector<TaskID> keys;
keys.reserve(pending_tasks_.size());
for (const auto &id_task_pair : pending_tasks_) {
keys.push_back(id_task_pair.first);
}
return keys;
}
void TaskDependencyManager::TaskPending(const Task &task) {
// Direct tasks are not tracked by the raylet.
// NOTE(zhijunfu): Direct tasks are not tracked by the raylet,
// but we still need raylet to reconstruct the actors.
// For direct actor creation task:
// - Initially the caller leases a worker from raylet and
// then pushes actor creation task directly to the worker,
// thus it doesn't need task lease. And actually if we
// acquire a lease in this case and forget to cancel it,
// the lease would never expire which will prevent the
// actor from being reconstructed;
// - When a direct actor is reconstructed, raylet resubmits
// the task, and the task can be forwarded to another raylet,
// and eventually assigned to a worker. In this case we need
// the task lease to make sure there's only one raylet can
// resubmit the task.
if (task.GetTaskSpecification().IsDirectCall()) {
// We can use `OnDispatch` to differeniate whether this task is
// a worker lease request.
// For direct actor creation task:
// - when it's submitted by core worker, we guarantee that
// we always request a new worker lease, in that case
// `OnDispatch` is overriden to an actual callback.
// - when it's resubmitted by raylet because of reconstruction,
// `OnDispatch` will not be overriden and thus is nullptr.
if (task.GetTaskSpecification().IsActorCreationTask() &&
task.OnDispatch() == nullptr) {
// This is an actor creation task, and it's being reconstructed,
// in this case we still need the task lease. Note that we don't
// require task lease for direct actor creation task.
} else {
return;
}
}
TaskID task_id = task.GetTaskSpecification().TaskId();
RAY_LOG(DEBUG) << "Task execution " << task_id << " pending";
// Record that the task is pending execution.
auto inserted =
pending_tasks_.emplace(task_id, PendingTask(initial_lease_period_ms_, io_service_));
if (inserted.second) {
// This is the first time we've heard that this task is pending. Find any
// subscribed tasks that are dependent on objects created by the pending
// task.
auto remote_task_entry = required_tasks_.find(task_id);
if (remote_task_entry != required_tasks_.end()) {
for (const auto &object_entry : remote_task_entry->second) {
// This object created by the pending task will appear locally once the
// task completes execution. Cancel any in-progress operations to make
// the object local.
HandleRemoteDependencyCanceled(object_entry.first);
}
}
// Acquire the lease for the task's execution in the global lease table.
AcquireTaskLease(task_id);
}
}
void TaskDependencyManager::AcquireTaskLease(const TaskID &task_id) {
auto it = pending_tasks_.find(task_id);
int64_t now_ms = current_time_ms();
if (it == pending_tasks_.end()) {
return;
}
// Check that we were able to renew the task lease before the previous one
// expired.
if (now_ms > it->second.expires_at) {
RAY_LOG(WARNING) << "Task " << task_id << " lease to renew has already expired by "
<< (it->second.expires_at - now_ms) << "ms";
}
auto task_lease_data = std::make_shared<TaskLeaseData>();
task_lease_data->set_task_id(task_id.Binary());
task_lease_data->set_node_manager_id(client_id_.Hex());
task_lease_data->set_acquired_at(absl::GetCurrentTimeNanos() / 1000000);
task_lease_data->set_timeout(it->second.lease_period);
RAY_CHECK_OK(gcs_client_->Tasks().AsyncAddTaskLease(task_lease_data, nullptr));
auto period = boost::posix_time::milliseconds(it->second.lease_period / 2);
it->second.lease_timer->expires_from_now(period);
it->second.lease_timer->async_wait(
[this, task_id](const boost::system::error_code &error) {
if (!error) {
AcquireTaskLease(task_id);
} else {
// Check that the error was due to the timer being canceled.
RAY_CHECK(error == boost::asio::error::operation_aborted);
}
});
it->second.expires_at = now_ms + it->second.lease_period;
it->second.lease_period = std::min(it->second.lease_period * 2,
RayConfig::instance().max_task_lease_timeout_ms());
}
void TaskDependencyManager::TaskCanceled(const TaskID &task_id) {
RAY_LOG(DEBUG) << "Task execution " << task_id << " canceled";
// Record that the task is no longer pending execution.
auto it = pending_tasks_.find(task_id);
if (it == pending_tasks_.end()) {
return;
}
pending_tasks_.erase(it);
// Find any subscribed tasks that are dependent on objects created by the
// canceled task.
auto remote_task_entry = required_tasks_.find(task_id);
if (remote_task_entry != required_tasks_.end()) {
for (const auto &object_entry : remote_task_entry->second) {
// This object created by the task will no longer appear locally since
// the task is canceled. Try to make the object local if necessary.
HandleRemoteDependencyRequired(object_entry.first);
}
}
}
void TaskDependencyManager::RemoveTasksAndRelatedObjects(
const std::unordered_set<TaskID> &task_ids) {
// Collect a list of all the unique objects that these tasks were subscribed
// to.
std::unordered_set<ObjectID> required_objects;
for (auto it = task_ids.begin(); it != task_ids.end(); it++) {
auto task_it = task_dependencies_.find(*it);
if (task_it != task_dependencies_.end()) {
// Add the objects that this task was subscribed to.
required_objects.insert(task_it->second.get_dependencies.begin(),
task_it->second.get_dependencies.end());
}
// The task no longer depends on anything.
task_dependencies_.erase(*it);
// The task is no longer pending execution.
pending_tasks_.erase(*it);
}
// Cancel all of the objects that were required by the removed tasks.
for (const auto &object_id : required_objects) {
TaskID creating_task_id = object_id.TaskId();
required_tasks_.erase(creating_task_id);
HandleRemoteDependencyCanceled(object_id);
}
// Make sure that the tasks in task_ids no longer have tasks dependent on
// them.
for (const auto &task_id : task_ids) {
RAY_CHECK(required_tasks_.find(task_id) == required_tasks_.end())
<< "RemoveTasksAndRelatedObjects was called on " << task_id
<< ", but another task depends on it that was not included in the argument";
}
}
std::string TaskDependencyManager::DebugString() const {
std::stringstream result;
result << "TaskDependencyManager:";
result << "\n- task dep map size: " << task_dependencies_.size();
result << "\n- task req map size: " << required_tasks_.size();
result << "\n- req objects map size: " << required_objects_.size();
result << "\n- local objects map size: " << local_objects_.size();
result << "\n- pending tasks map size: " << pending_tasks_.size();
return result.str();
}
void TaskDependencyManager::RecordMetrics() const {
stats::TaskDependencyManagerStats().Record(
task_dependencies_.size(), {{stats::ValueTypeKey, "num_task_dependencies"}});
stats::TaskDependencyManagerStats().Record(
required_tasks_.size(), {{stats::ValueTypeKey, "num_required_tasks"}});
stats::TaskDependencyManagerStats().Record(
required_objects_.size(), {{stats::ValueTypeKey, "num_required_objects"}});
stats::TaskDependencyManagerStats().Record(
local_objects_.size(), {{stats::ValueTypeKey, "num_local_objects"}});
stats::TaskDependencyManagerStats().Record(
pending_tasks_.size(), {{stats::ValueTypeKey, "num_pending_tasks"}});
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/task_dependency_manager.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_TASK_DEPENDENCY_MANAGER_H
#define RAY_RAYLET_TASK_DEPENDENCY_MANAGER_H
// clang-format off
#include "ray/common/id.h"
#include "ray/common/task/task.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/object_manager/object_manager.h"
#include "ray/raylet/reconstruction_policy.h"
// clang-format on
namespace ray {
namespace raylet {
using rpc::TaskLeaseData;
class ReconstructionPolicy;
/// \class TaskDependencyManager
///
/// Responsible for managing object dependencies for tasks. The caller can
/// subscribe to object dependencies for a task. The task manager will
/// determine which object dependencies are remote. These are the objects that
/// are neither in the local object store, nor will they be created by a
/// locally queued task. The task manager will request that these objects be
/// made available locally, either by object transfer from a remote node or
/// reconstruction. The task manager will also cancel these objects if they are
/// no longer needed by any task.
class TaskDependencyManager {
public:
/// Create a task dependency manager.
TaskDependencyManager(ObjectManagerInterface &object_manager,
ReconstructionPolicyInterface &reconstruction_policy,
boost::asio::io_service &io_service, const ClientID &client_id,
int64_t initial_lease_period_ms,
std::shared_ptr<gcs::GcsClient> gcs_client);
/// Check whether an object is locally available.
///
/// \param object_id The object to check for.
/// \return Whether the object is local.
bool CheckObjectLocal(const ObjectID &object_id) const;
/// Subscribe to object depedencies required by the task and check whether
/// all dependencies are fulfilled. This should be called for task arguments and
/// `ray.get` calls during task execution.
///
/// The TaskDependencyManager will track the task's dependencies
/// until UnsubscribeGetDependencies is called on the same task ID. If any
/// dependencies are remote, then they will be requested. When the last
/// remote dependency later appears locally via a call to HandleObjectLocal,
/// the subscribed task will be returned by the HandleObjectLocal call,
/// signifying that it is ready to run. This method may be called multiple
/// times per task.
///
/// \param task_id The ID of the task whose dependencies to subscribe to.
/// \param required_objects The objects required by the task.
/// \return Whether all of the given dependencies for the given task are
/// local.
bool SubscribeGetDependencies(const TaskID &task_id,
const std::vector<ObjectID> &required_objects);
/// Subscribe to object depedencies required by the worker. This should be called for
/// ray.wait calls during task execution.
///
/// The TaskDependencyManager will track all remote dependencies until the
/// dependencies are local, or until UnsubscribeWaitDependencies is called
/// with the same worker ID, whichever occurs first. Remote dependencies will
/// be requested. This method may be called multiple times per worker on the
/// same objects.
///
/// \param worker_id The ID of the worker that called `ray.wait`.
/// \param required_objects The objects required by the worker.
/// \return Void.
void SubscribeWaitDependencies(const WorkerID &worker_id,
const std::vector<ObjectID> &required_objects);
/// Unsubscribe from the object dependencies required by this task through the task
/// arguments or `ray.get`. If the objects were remote and are no longer required by any
/// subscribed task, then they will be canceled.
///
/// \param task_id The ID of the task whose dependencies we should unsubscribe from.
/// \return Whether the task was subscribed before.
bool UnsubscribeGetDependencies(const TaskID &task_id);
/// Unsubscribe from the object dependencies required by this worker through `ray.wait`.
/// If the objects were remote and are no longer required by any subscribed task, then
/// they will be canceled.
///
/// \param worker_id The ID of the worker whose dependencies we should unsubscribe from.
/// \return The objects that the worker was waiting on.
void UnsubscribeWaitDependencies(const WorkerID &worker_id);
/// Mark that the given task is pending execution. Any objects that it creates
/// are now considered to be pending creation. If there are any subscribed
/// tasks that depend on these objects, then the objects will be canceled.
///
/// \param task The task that is pending execution.
void TaskPending(const Task &task);
/// Mark that the given task is no longer pending execution. Any objects that
/// it creates that are not already local are now considered to be remote. If
/// there are any subscribed tasks that depend on these objects, then the
/// objects will be requested.
///
/// \param task_id The ID of the task to cancel.
void TaskCanceled(const TaskID &task_id);
/// Handle an object becoming locally available. If there are any subscribed
/// tasks that depend on this object, then the object will be canceled.
///
/// \param object_id The object ID of the object to mark as locally
/// available.
/// \return A list of task IDs. This contains all subscribed tasks that now
/// have all of their dependencies fulfilled, once this object was made
/// local.
std::vector<TaskID> HandleObjectLocal(const ray::ObjectID &object_id);
/// Handle an object that is no longer locally available. If there are any
/// subscribed tasks that depend on this object, then the object will be
/// requested.
///
/// \param object_id The object ID of the object that was previously locally
/// available.
/// \return A list of task IDs. This contains all subscribed tasks that
/// previously had all of their dependencies fulfilled, but are now missing
/// this object dependency.
std::vector<TaskID> HandleObjectMissing(const ray::ObjectID &object_id);
/// Get a list of all Tasks currently marked as pending object dependencies in the task
/// dependency manager.
///
/// \return Return a vector of TaskIDs for tasks registered as pending.
std::vector<TaskID> GetPendingTasks() const;
/// Remove all of the tasks specified. These tasks will no longer be
/// considered pending and the objects they depend on will no longer be
/// required.
///
/// \param task_ids The collection of task IDs. For a given task in this set,
/// all tasks that depend on the task must also be included in the set.
void RemoveTasksAndRelatedObjects(const std::unordered_set<TaskID> &task_ids);
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
private:
struct ObjectDependencies {
/// The tasks that depend on this object, either because the object is a task argument
/// or because the task called `ray.get` on the object.
std::unordered_set<TaskID> dependent_tasks;
/// The workers that depend on this object because they called `ray.wait` on the
/// object.
std::unordered_set<WorkerID> dependent_workers;
bool Empty() const { return dependent_tasks.empty() && dependent_workers.empty(); }
};
/// A struct to represent the object dependencies of a task.
struct TaskDependencies {
/// The objects that the task depends on. These are either the arguments to
/// the task or objects that the task calls `ray.get` on. These must be
/// local before the task is ready to execute. Objects are removed from
/// this set once UnsubscribeGetDependencies is called.
std::unordered_set<ObjectID> get_dependencies;
/// The number of object arguments that are not available locally. This
/// must be zero before the task is ready to execute.
int64_t num_missing_get_dependencies;
};
/// The objects that the worker is fetching. These are objects that a task that executed
/// or is executing on the worker called `ray.wait` on that are not yet local. An object
/// will be automatically removed from this set once it becomes local.
using WorkerDependencies = std::unordered_set<ObjectID>;
struct PendingTask {
PendingTask(int64_t initial_lease_period_ms, boost::asio::io_service &io_service)
: lease_period(initial_lease_period_ms),
expires_at(INT64_MAX),
lease_timer(new boost::asio::deadline_timer(io_service)) {}
/// The timeout within which the lease should be renewed.
int64_t lease_period;
/// The time at which the current lease will expire, according to this
/// node's steady clock.
int64_t expires_at;
/// A timer used to determine when to next renew the lease.
std::unique_ptr<boost::asio::deadline_timer> lease_timer;
};
/// Check whether the given object needs to be made available through object
/// transfer or reconstruction. These are objects for which: (1) there is a
/// subscribed task dependent on it, (2) the object is not local, and (3) the
/// task that creates the object is not pending execution locally.
bool CheckObjectRequired(const ObjectID &object_id) const;
/// If the given object is required, then request that the object be made
/// available through object transfer or reconstruction.
void HandleRemoteDependencyRequired(const ObjectID &object_id);
/// If the given object is no longer required, then cancel any in-progress
/// operations to make the object available through object transfer or
/// reconstruction.
void HandleRemoteDependencyCanceled(const ObjectID &object_id);
/// Acquire the task lease in the GCS for the given task. This is used to
/// indicate to other nodes that the task is currently pending on this node.
/// The task lease has an expiration time. If we do not renew the lease
/// before that time, then other nodes may choose to execute the task.
void AcquireTaskLease(const TaskID &task_id);
/// The object manager, used to fetch required objects from remote nodes.
ObjectManagerInterface &object_manager_;
/// The reconstruction policy, used to reconstruct required objects that no
/// longer exist on any live nodes.
ReconstructionPolicyInterface &reconstruction_policy_;
/// The event loop, used to set timers for renewing task leases. The task
/// leases are used to indicate which tasks are pending execution on this
/// node and must be periodically renewed.
boost::asio::io_service &io_service_;
/// This node's GCS client ID, used in the task lease information.
const ClientID client_id_;
/// For a given task, the expiration period of the initial task lease that is
/// added to the GCS. The lease expiration period is doubled every time the
/// lease is renewed.
const int64_t initial_lease_period_ms_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
/// A mapping from task ID of each subscribed task to its list of object
/// dependencies, either task arguments or objects passed into `ray.get`.
std::unordered_map<ray::TaskID, TaskDependencies> task_dependencies_;
/// A mapping from worker ID to each object that the worker called `ray.wait` on.
std::unordered_map<ray::WorkerID, WorkerDependencies> worker_dependencies_;
/// All tasks whose outputs are required by a subscribed task. This is a
/// mapping from task ID to information about the objects that the task
/// creates, either by return value or by `ray.put`. For each object, we
/// store the IDs of the subscribed tasks that are dependent on the object.
std::unordered_map<ray::TaskID, std::unordered_map<ObjectID, ObjectDependencies>>
required_tasks_;
/// Objects that are required by a subscribed task, are not local, and are
/// not created by a pending task. For these objects, there are pending
/// operations to make the object available.
std::unordered_set<ray::ObjectID> required_objects_;
/// The set of locally available objects.
std::unordered_set<ray::ObjectID> local_objects_;
/// The set of tasks that are pending execution. Any objects created by these
/// tasks that are not already local are pending creation.
std::unordered_map<ray::TaskID, PendingTask> pending_tasks_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_TASK_DEPENDENCY_MANAGER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/task_dependency_manager_test.cc
|
C++
|
#include <list>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <boost/asio.hpp>
#include "ray/common/task/task_util.h"
#include "ray/gcs/redis_accessor.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/raylet/task_dependency_manager.h"
#include "ray/util/test_util.h"
namespace ray {
namespace raylet {
using ::testing::_;
const static JobID kDefaultJobId = JobID::FromInt(1);
const static TaskID kDefaultDriverTaskId = TaskID::ForDriverTask(kDefaultJobId);
class MockObjectManager : public ObjectManagerInterface {
public:
MOCK_METHOD1(Pull, ray::Status(const ObjectID &object_id));
MOCK_METHOD1(CancelPull, void(const ObjectID &object_id));
};
class MockReconstructionPolicy : public ReconstructionPolicyInterface {
public:
MOCK_METHOD1(ListenAndMaybeReconstruct, void(const ObjectID &object_id));
MOCK_METHOD1(Cancel, void(const ObjectID &object_id));
};
class MockTaskInfoAccessor : public gcs::RedisTaskInfoAccessor {
public:
MockTaskInfoAccessor(gcs::RedisGcsClient *client)
: gcs::RedisTaskInfoAccessor(client) {}
MOCK_METHOD2(AsyncAddTaskLease,
ray::Status(const std::shared_ptr<TaskLeaseData> &data_ptr,
const gcs::StatusCallback &callback));
};
class MockGcsClient : public gcs::RedisGcsClient {
public:
MockGcsClient(const gcs::GcsClientOptions &options) : gcs::RedisGcsClient(options) {}
void Init(MockTaskInfoAccessor *task_accessor_mock) {
task_accessor_.reset(task_accessor_mock);
}
};
class TaskDependencyManagerTest : public ::testing::Test {
public:
TaskDependencyManagerTest()
: object_manager_mock_(),
reconstruction_policy_mock_(),
io_service_(),
options_("", 1, ""),
gcs_client_mock_(new MockGcsClient(options_)),
task_accessor_mock_(new MockTaskInfoAccessor(gcs_client_mock_.get())),
initial_lease_period_ms_(100),
task_dependency_manager_(object_manager_mock_, reconstruction_policy_mock_,
io_service_, ClientID::Nil(), initial_lease_period_ms_,
gcs_client_mock_) {
gcs_client_mock_->Init(task_accessor_mock_);
}
void Run(uint64_t timeout_ms) {
auto timer_period = boost::posix_time::milliseconds(timeout_ms);
auto timer = std::make_shared<boost::asio::deadline_timer>(io_service_, timer_period);
timer->async_wait([this](const boost::system::error_code &error) {
ASSERT_FALSE(error);
io_service_.stop();
});
io_service_.run();
io_service_.reset();
}
protected:
MockObjectManager object_manager_mock_;
MockReconstructionPolicy reconstruction_policy_mock_;
boost::asio::io_service io_service_;
gcs::GcsClientOptions options_;
std::shared_ptr<MockGcsClient> gcs_client_mock_;
MockTaskInfoAccessor *task_accessor_mock_;
int64_t initial_lease_period_ms_;
TaskDependencyManager task_dependency_manager_;
};
static inline Task ExampleTask(const std::vector<ObjectID> &arguments,
uint64_t num_returns) {
TaskSpecBuilder builder;
rpc::Address address;
builder.SetCommonTaskSpec(RandomTaskId(), Language::PYTHON, {"", "", ""}, JobID::Nil(),
RandomTaskId(), 0, RandomTaskId(), address, num_returns,
false, {}, {});
for (const auto &arg : arguments) {
builder.AddByRefArg(arg);
}
rpc::TaskExecutionSpec execution_spec_message;
execution_spec_message.set_num_forwards(1);
return Task(builder.Build(), TaskExecutionSpecification(execution_spec_message));
}
std::vector<Task> MakeTaskChain(int chain_size,
const std::vector<ObjectID> &initial_arguments,
int64_t num_returns) {
std::vector<Task> task_chain;
std::vector<ObjectID> arguments = initial_arguments;
for (int i = 0; i < chain_size; i++) {
auto task = ExampleTask(arguments, num_returns);
task_chain.push_back(task);
arguments.clear();
for (size_t j = 0; j < task.GetTaskSpecification().NumReturns(); j++) {
arguments.push_back(task.GetTaskSpecification().ReturnIdForPlasma(j));
}
}
return task_chain;
}
TEST_F(TaskDependencyManagerTest, TestSimpleTask) {
// Create a task with 3 arguments.
int num_arguments = 3;
std::vector<ObjectID> arguments;
for (int i = 0; i < num_arguments; i++) {
arguments.push_back(ObjectID::FromRandom());
}
TaskID task_id = RandomTaskId();
// No objects have been registered in the task dependency manager, so all
// arguments should be remote.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, Pull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(argument_id));
}
// Subscribe to the task's dependencies.
bool ready = task_dependency_manager_.SubscribeGetDependencies(task_id, arguments);
ASSERT_FALSE(ready);
// All arguments should be canceled as they become available locally.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, CancelPull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(argument_id));
}
// For each argument except the last, tell the task dependency manager that
// the argument is local.
int i = 0;
for (; i < num_arguments - 1; i++) {
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(arguments[i]);
ASSERT_TRUE(ready_task_ids.empty());
}
// Tell the task dependency manager that the last argument is local. Now the
// task should be ready to run.
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(arguments[i]);
ASSERT_EQ(ready_task_ids.size(), 1);
ASSERT_EQ(ready_task_ids.front(), task_id);
}
TEST_F(TaskDependencyManagerTest, TestDuplicateSubscribeGetDependencies) {
// Create a task with 3 arguments.
TaskID task_id = RandomTaskId();
int num_arguments = 3;
std::vector<ObjectID> arguments;
for (int i = 0; i < num_arguments; i++) {
// Add the new argument to the list of dependencies to subscribe to.
ObjectID argument_id = ObjectID::FromRandom();
arguments.push_back(argument_id);
// Subscribe to the task's dependencies. All arguments except the last are
// duplicates of previous subscription calls. Each argument should only be
// requested from the node manager once.
EXPECT_CALL(object_manager_mock_, Pull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(argument_id));
bool ready = task_dependency_manager_.SubscribeGetDependencies(task_id, arguments);
ASSERT_FALSE(ready);
}
// All arguments should be canceled as they become available locally.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, CancelPull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(argument_id));
}
// For each argument except the last, tell the task dependency manager that
// the argument is local.
int i = 0;
for (; i < num_arguments - 1; i++) {
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(arguments[i]);
ASSERT_TRUE(ready_task_ids.empty());
}
// Tell the task dependency manager that the last argument is local. Now the
// task should be ready to run.
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(arguments[i]);
ASSERT_EQ(ready_task_ids.size(), 1);
ASSERT_EQ(ready_task_ids.front(), task_id);
}
TEST_F(TaskDependencyManagerTest, TestMultipleTasks) {
// Create 3 tasks that are dependent on the same object.
ObjectID argument_id = ObjectID::FromRandom();
std::vector<TaskID> dependent_tasks;
int num_dependent_tasks = 3;
// The object should only be requested from the object manager once for all
// three tasks.
EXPECT_CALL(object_manager_mock_, Pull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(argument_id));
for (int i = 0; i < num_dependent_tasks; i++) {
TaskID task_id = RandomTaskId();
dependent_tasks.push_back(task_id);
// Subscribe to each of the task's dependencies.
bool ready =
task_dependency_manager_.SubscribeGetDependencies(task_id, {argument_id});
ASSERT_FALSE(ready);
}
// Tell the task dependency manager that the object is local.
EXPECT_CALL(object_manager_mock_, CancelPull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(argument_id));
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(argument_id);
// Check that all tasks are now ready to run.
ASSERT_EQ(ready_task_ids.size(), dependent_tasks.size());
for (const auto &task_id : ready_task_ids) {
ASSERT_NE(std::find(dependent_tasks.begin(), dependent_tasks.end(), task_id),
dependent_tasks.end());
}
}
TEST_F(TaskDependencyManagerTest, TestTaskChain) {
// Create 3 tasks, each dependent on the previous. The first task has no
// arguments.
int num_tasks = 3;
auto tasks = MakeTaskChain(num_tasks, {}, 1);
int num_ready_tasks = 1;
int i = 0;
// No objects should be remote or canceled since each task depends on a
// locally queued task.
EXPECT_CALL(object_manager_mock_, Pull(_)).Times(0);
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(_)).Times(0);
EXPECT_CALL(object_manager_mock_, CancelPull(_)).Times(0);
EXPECT_CALL(reconstruction_policy_mock_, Cancel(_)).Times(0);
for (const auto &task : tasks) {
// Subscribe to each of the tasks' arguments.
const auto &arguments = task.GetDependencies();
bool ready = task_dependency_manager_.SubscribeGetDependencies(
task.GetTaskSpecification().TaskId(), arguments);
if (i < num_ready_tasks) {
// The first task should be ready to run since it has no arguments.
ASSERT_TRUE(ready);
} else {
// All remaining tasks depend on the previous task.
ASSERT_FALSE(ready);
}
// Mark each task as pending. A lease entry should be added to the GCS for
// each task.
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _));
task_dependency_manager_.TaskPending(task);
i++;
}
// Simulate executing each task. Each task's completion should make the next
// task runnable.
while (!tasks.empty()) {
auto task = tasks.front();
tasks.erase(tasks.begin());
TaskID task_id = task.GetTaskSpecification().TaskId();
auto return_id = task.GetTaskSpecification().ReturnIdForPlasma(0);
task_dependency_manager_.UnsubscribeGetDependencies(task_id);
// Simulate the object notifications for the task's return values.
auto ready_tasks = task_dependency_manager_.HandleObjectLocal(return_id);
if (tasks.empty()) {
// If there are no more tasks, then there should be no more tasks that
// become ready to run.
ASSERT_TRUE(ready_tasks.empty());
} else {
// If there are more tasks to run, then the next task in the chain should
// now be ready to run.
ASSERT_EQ(ready_tasks.size(), 1);
ASSERT_EQ(ready_tasks.front(), tasks.front().GetTaskSpecification().TaskId());
}
// Simulate the task finishing execution.
task_dependency_manager_.TaskCanceled(task_id);
}
}
TEST_F(TaskDependencyManagerTest, TestDependentPut) {
// Create a task with 3 arguments.
auto task1 = ExampleTask({}, 0);
ObjectID put_id = ObjectID::ForPut(task1.GetTaskSpecification().TaskId(), /*index=*/1,
/*transport_type=*/0);
auto task2 = ExampleTask({put_id}, 0);
// No objects have been registered in the task dependency manager, so the put
// object should be remote.
EXPECT_CALL(object_manager_mock_, Pull(put_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(put_id));
// Subscribe to the task's dependencies.
bool ready = task_dependency_manager_.SubscribeGetDependencies(
task2.GetTaskSpecification().TaskId(), {put_id});
ASSERT_FALSE(ready);
// The put object should be considered local as soon as the task that creates
// it is pending execution.
EXPECT_CALL(object_manager_mock_, CancelPull(put_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(put_id));
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _));
task_dependency_manager_.TaskPending(task1);
}
TEST_F(TaskDependencyManagerTest, TestTaskForwarding) {
// Create 2 tasks, one dependent on the other. The first has no arguments.
int num_tasks = 2;
auto tasks = MakeTaskChain(num_tasks, {}, 1);
for (const auto &task : tasks) {
// Subscribe to each of the tasks' arguments.
const auto &arguments = task.GetDependencies();
static_cast<void>(task_dependency_manager_.SubscribeGetDependencies(
task.GetTaskSpecification().TaskId(), arguments));
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _));
task_dependency_manager_.TaskPending(task);
}
// Get the first task.
const auto task = tasks.front();
TaskID task_id = task.GetTaskSpecification().TaskId();
ObjectID return_id = task.GetTaskSpecification().ReturnIdForPlasma(0);
// Simulate forwarding the first task to a remote node.
task_dependency_manager_.UnsubscribeGetDependencies(task_id);
// The object returned by the first task should be considered remote once we
// cancel the forwarded task, since the second task depends on it.
EXPECT_CALL(object_manager_mock_, Pull(return_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(return_id));
task_dependency_manager_.TaskCanceled(task_id);
// Simulate the task executing on a remote node and its return value
// appearing locally.
EXPECT_CALL(object_manager_mock_, CancelPull(return_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(return_id));
auto ready_tasks = task_dependency_manager_.HandleObjectLocal(return_id);
// Check that the task that we kept is now ready to run.
ASSERT_EQ(ready_tasks.size(), 1);
ASSERT_EQ(ready_tasks.front(), tasks.back().GetTaskSpecification().TaskId());
}
TEST_F(TaskDependencyManagerTest, TestEviction) {
// Create a task with 3 arguments.
int num_arguments = 3;
std::vector<ObjectID> arguments;
for (int i = 0; i < num_arguments; i++) {
arguments.push_back(ObjectID::FromRandom());
}
TaskID task_id = RandomTaskId();
// No objects have been registered in the task dependency manager, so all
// arguments should be remote.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, Pull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(argument_id));
}
// Subscribe to the task's dependencies.
bool ready = task_dependency_manager_.SubscribeGetDependencies(task_id, arguments);
ASSERT_FALSE(ready);
// Tell the task dependency manager that each of the arguments is now
// available.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, CancelPull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(argument_id));
}
for (size_t i = 0; i < arguments.size(); i++) {
std::vector<TaskID> ready_tasks;
ready_tasks = task_dependency_manager_.HandleObjectLocal(arguments[i]);
if (i == arguments.size() - 1) {
ASSERT_EQ(ready_tasks.size(), 1);
ASSERT_EQ(ready_tasks.front(), task_id);
} else {
ASSERT_TRUE(ready_tasks.empty());
}
}
// Simulate each of the arguments getting evicted. Each object should now be
// considered remote.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, Pull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(argument_id));
}
for (size_t i = 0; i < arguments.size(); i++) {
std::vector<TaskID> waiting_tasks;
waiting_tasks = task_dependency_manager_.HandleObjectMissing(arguments[i]);
if (i == 0) {
// The first eviction should cause the task to go back to the waiting
// state.
ASSERT_EQ(waiting_tasks.size(), 1);
ASSERT_EQ(waiting_tasks.front(), task_id);
} else {
// The subsequent evictions shouldn't cause any more tasks to go back to
// the waiting state.
ASSERT_TRUE(waiting_tasks.empty());
}
}
// Tell the task dependency manager that each of the arguments is available
// again.
for (const auto &argument_id : arguments) {
EXPECT_CALL(object_manager_mock_, CancelPull(argument_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(argument_id));
}
for (size_t i = 0; i < arguments.size(); i++) {
std::vector<TaskID> ready_tasks;
ready_tasks = task_dependency_manager_.HandleObjectLocal(arguments[i]);
if (i == arguments.size() - 1) {
ASSERT_EQ(ready_tasks.size(), 1);
ASSERT_EQ(ready_tasks.front(), task_id);
} else {
ASSERT_TRUE(ready_tasks.empty());
}
}
}
TEST_F(TaskDependencyManagerTest, TestTaskLeaseRenewal) {
// Mark a task as pending.
auto task = ExampleTask({}, 0);
// We expect an initial call to acquire the lease.
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _));
task_dependency_manager_.TaskPending(task);
// Check that while the task is still pending, there is one call to renew the
// lease for each lease period that passes. The lease period doubles with
// each renewal.
int num_expected_calls = 4;
int64_t sleep_time = 0;
for (int i = 1; i <= num_expected_calls; i++) {
sleep_time += i * initial_lease_period_ms_;
}
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _)).Times(num_expected_calls);
Run(sleep_time);
}
TEST_F(TaskDependencyManagerTest, TestRemoveTasksAndRelatedObjects) {
// Create 3 tasks, each dependent on the previous. The first task has no
// arguments.
int num_tasks = 3;
auto tasks = MakeTaskChain(num_tasks, {}, 1);
// No objects should be remote or canceled since each task depends on a
// locally queued task.
EXPECT_CALL(object_manager_mock_, Pull(_)).Times(0);
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(_)).Times(0);
EXPECT_CALL(object_manager_mock_, CancelPull(_)).Times(0);
EXPECT_CALL(reconstruction_policy_mock_, Cancel(_)).Times(0);
for (const auto &task : tasks) {
// Subscribe to each of the tasks' arguments.
const auto &arguments = task.GetDependencies();
task_dependency_manager_.SubscribeGetDependencies(
task.GetTaskSpecification().TaskId(), arguments);
// Mark each task as pending. A lease entry should be added to the GCS for
// each task.
EXPECT_CALL(*task_accessor_mock_, AsyncAddTaskLease(_, _));
task_dependency_manager_.TaskPending(task);
}
// Simulate executing the first task. This should make the second task
// runnable.
auto task = tasks.front();
TaskID task_id = task.GetTaskSpecification().TaskId();
auto return_id = task.GetTaskSpecification().ReturnIdForPlasma(0);
task_dependency_manager_.UnsubscribeGetDependencies(task_id);
// Simulate the object notifications for the task's return values.
auto ready_tasks = task_dependency_manager_.HandleObjectLocal(return_id);
// The second task should be ready to run.
ASSERT_EQ(ready_tasks.size(), 1);
// Simulate the task finishing execution.
task_dependency_manager_.TaskCanceled(task_id);
// Remove all tasks from the manager except the first task, which already
// finished executing.
std::unordered_set<TaskID> task_ids;
for (const auto &task : tasks) {
task_ids.insert(task.GetTaskSpecification().TaskId());
}
task_ids.erase(task_id);
task_dependency_manager_.RemoveTasksAndRelatedObjects(task_ids);
// Simulate evicting the return value of the first task. Make sure that this
// does not return the second task, which should have been removed.
auto waiting_tasks = task_dependency_manager_.HandleObjectMissing(return_id);
ASSERT_TRUE(waiting_tasks.empty());
// Simulate the object notifications for the second task's return values.
// Make sure that this does not return the third task, which should have been
// removed.
return_id = tasks[1].GetTaskSpecification().ReturnIdForPlasma(0);
ready_tasks = task_dependency_manager_.HandleObjectLocal(return_id);
ASSERT_TRUE(ready_tasks.empty());
}
/// Test that when no objects are locally available, a `ray.wait` call makes
/// the correct requests to remote nodes and correctly cancels the requests
/// when the `ray.wait` call is canceled.
TEST_F(TaskDependencyManagerTest, TestWaitDependencies) {
// Generate a random worker and objects to wait on.
WorkerID worker_id = WorkerID::FromRandom();
int num_objects = 3;
std::vector<ObjectID> wait_object_ids;
for (int i = 0; i < num_objects; i++) {
wait_object_ids.push_back(ObjectID::FromRandom());
}
// Simulate a worker calling `ray.wait` on some objects.
EXPECT_CALL(object_manager_mock_, Pull(_)).Times(num_objects);
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(_))
.Times(num_objects);
task_dependency_manager_.SubscribeWaitDependencies(worker_id, wait_object_ids);
// Check that it's okay to call `ray.wait` on the same objects again. No new
// calls should be made to try and make the objects local.
task_dependency_manager_.SubscribeWaitDependencies(worker_id, wait_object_ids);
// Cancel the worker's `ray.wait`. calls.
EXPECT_CALL(object_manager_mock_, CancelPull(_)).Times(num_objects);
EXPECT_CALL(reconstruction_policy_mock_, Cancel(_)).Times(num_objects);
task_dependency_manager_.UnsubscribeWaitDependencies(worker_id);
}
/// Test that when one of the objects is already local at the time of the
/// `ray.wait` call, the `ray.wait` call does not trigger any requests to
/// remote nodes for that object.
TEST_F(TaskDependencyManagerTest, TestWaitDependenciesObjectLocal) {
// Generate a random worker and objects to wait on.
WorkerID worker_id = WorkerID::FromRandom();
int num_objects = 3;
std::vector<ObjectID> wait_object_ids;
for (int i = 0; i < num_objects; i++) {
wait_object_ids.push_back(ObjectID::FromRandom());
}
// Simulate one of the objects becoming local. The later `ray.wait` call
// should have no effect because the object is already local.
const ObjectID local_object_id = std::move(wait_object_ids.back());
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(local_object_id);
ASSERT_TRUE(ready_task_ids.empty());
// Simulate a worker calling `ray.wait` on the objects. It should only make
// requests for the objects that are not local.
for (const auto &object_id : wait_object_ids) {
if (object_id != local_object_id) {
EXPECT_CALL(object_manager_mock_, Pull(object_id));
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(object_id));
}
}
task_dependency_manager_.SubscribeWaitDependencies(worker_id, wait_object_ids);
// Simulate the local object getting evicted. The `ray.wait` call should not
// be reactivated.
auto waiting_task_ids = task_dependency_manager_.HandleObjectMissing(local_object_id);
ASSERT_TRUE(waiting_task_ids.empty());
// Simulate a worker calling `ray.wait` on the objects. It should only make
// requests for the objects that are not local.
for (const auto &object_id : wait_object_ids) {
if (object_id != local_object_id) {
EXPECT_CALL(object_manager_mock_, CancelPull(object_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(object_id));
}
}
task_dependency_manager_.UnsubscribeWaitDependencies(worker_id);
}
/// Test that when one of the objects becomes local after a `ray.wait` call,
/// all requests to remote nodes associated with the object are canceled.
TEST_F(TaskDependencyManagerTest, TestWaitDependenciesHandleObjectLocal) {
// Generate a random worker and objects to wait on.
WorkerID worker_id = WorkerID::FromRandom();
int num_objects = 3;
std::vector<ObjectID> wait_object_ids;
for (int i = 0; i < num_objects; i++) {
wait_object_ids.push_back(ObjectID::FromRandom());
}
// Simulate a worker calling `ray.wait` on some objects.
EXPECT_CALL(object_manager_mock_, Pull(_)).Times(num_objects);
EXPECT_CALL(reconstruction_policy_mock_, ListenAndMaybeReconstruct(_))
.Times(num_objects);
task_dependency_manager_.SubscribeWaitDependencies(worker_id, wait_object_ids);
// Simulate one of the objects becoming local while the `ray.wait` calls is
// active. The `ray.wait` call should be canceled.
const ObjectID local_object_id = std::move(wait_object_ids.back());
wait_object_ids.pop_back();
EXPECT_CALL(object_manager_mock_, CancelPull(local_object_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(local_object_id));
auto ready_task_ids = task_dependency_manager_.HandleObjectLocal(local_object_id);
ASSERT_TRUE(ready_task_ids.empty());
// Simulate the local object getting evicted. The `ray.wait` call should not
// be reactivated.
auto waiting_task_ids = task_dependency_manager_.HandleObjectMissing(local_object_id);
ASSERT_TRUE(waiting_task_ids.empty());
// Cancel the worker's `ray.wait` calls. Only the objects that are still not
// local should be canceled.
for (const auto &object_id : wait_object_ids) {
EXPECT_CALL(object_manager_mock_, CancelPull(object_id));
EXPECT_CALL(reconstruction_policy_mock_, Cancel(object_id));
}
task_dependency_manager_.UnsubscribeWaitDependencies(worker_id);
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/worker.cc
|
C++
|
#include "worker.h"
#include <boost/bind.hpp>
#include "ray/raylet/format/node_manager_generated.h"
#include "ray/raylet/raylet.h"
#include "src/ray/protobuf/core_worker.grpc.pb.h"
#include "src/ray/protobuf/core_worker.pb.h"
namespace ray {
namespace raylet {
/// A constructor responsible for initializing the state of a worker.
Worker::Worker(const WorkerID &worker_id, const Language &language, int port,
std::shared_ptr<LocalClientConnection> connection,
rpc::ClientCallManager &client_call_manager)
: worker_id_(worker_id),
language_(language),
port_(port),
connection_(connection),
dead_(false),
blocked_(false),
client_call_manager_(client_call_manager),
is_detached_actor_(false) {
if (port_ > 0) {
rpc_client_ = std::unique_ptr<rpc::CoreWorkerClient>(
new rpc::CoreWorkerClient("127.0.0.1", port_, client_call_manager_));
}
}
void Worker::MarkDead() { dead_ = true; }
bool Worker::IsDead() const { return dead_; }
void Worker::MarkBlocked() { blocked_ = true; }
void Worker::MarkUnblocked() { blocked_ = false; }
bool Worker::IsBlocked() const { return blocked_; }
WorkerID Worker::WorkerId() const { return worker_id_; }
ProcessHandle Worker::Process() const { return proc_; }
void Worker::SetProcess(const ProcessHandle &proc) {
RAY_CHECK(!proc_); // this procedure should not be called multiple times
proc_ = proc;
}
Language Worker::GetLanguage() const { return language_; }
int Worker::Port() const { return port_; }
void Worker::AssignTaskId(const TaskID &task_id) { assigned_task_id_ = task_id; }
const TaskID &Worker::GetAssignedTaskId() const { return assigned_task_id_; }
bool Worker::AddBlockedTaskId(const TaskID &task_id) {
auto inserted = blocked_task_ids_.insert(task_id);
return inserted.second;
}
bool Worker::RemoveBlockedTaskId(const TaskID &task_id) {
auto erased = blocked_task_ids_.erase(task_id);
return erased == 1;
}
const std::unordered_set<TaskID> &Worker::GetBlockedTaskIds() const {
return blocked_task_ids_;
}
void Worker::AssignJobId(const JobID &job_id) { assigned_job_id_ = job_id; }
const JobID &Worker::GetAssignedJobId() const { return assigned_job_id_; }
void Worker::AssignActorId(const ActorID &actor_id) {
RAY_CHECK(actor_id_.IsNil())
<< "A worker that is already an actor cannot be assigned an actor ID again.";
RAY_CHECK(!actor_id.IsNil());
actor_id_ = actor_id;
}
const ActorID &Worker::GetActorId() const { return actor_id_; }
void Worker::MarkDetachedActor() { is_detached_actor_ = true; }
bool Worker::IsDetachedActor() const { return is_detached_actor_; }
const std::shared_ptr<LocalClientConnection> Worker::Connection() const {
return connection_;
}
const ResourceIdSet &Worker::GetLifetimeResourceIds() const {
return lifetime_resource_ids_;
}
void Worker::ResetLifetimeResourceIds() { lifetime_resource_ids_.Clear(); }
void Worker::SetLifetimeResourceIds(ResourceIdSet &resource_ids) {
lifetime_resource_ids_ = resource_ids;
}
const ResourceIdSet &Worker::GetTaskResourceIds() const { return task_resource_ids_; }
void Worker::ResetTaskResourceIds() { task_resource_ids_.Clear(); }
void Worker::SetTaskResourceIds(ResourceIdSet &resource_ids) {
task_resource_ids_ = resource_ids;
}
ResourceIdSet Worker::ReleaseTaskCpuResources() {
auto cpu_resources = task_resource_ids_.GetCpuResources();
// The "acquire" terminology is a bit confusing here. The resources are being
// "acquired" from the task_resource_ids_ object, and so the worker is losing
// some resources.
task_resource_ids_.Acquire(cpu_resources.ToResourceSet());
return cpu_resources;
}
void Worker::AcquireTaskCpuResources(const ResourceIdSet &cpu_resources) {
// The "release" terminology is a bit confusing here. The resources are being
// given back to the worker and so "released" by the caller.
task_resource_ids_.Release(cpu_resources);
}
const std::unordered_set<ObjectID> &Worker::GetActiveObjectIds() const {
return active_object_ids_;
}
void Worker::SetActiveObjectIds(const std::unordered_set<ObjectID> &&object_ids) {
active_object_ids_ = object_ids;
}
Status Worker::AssignTask(const Task &task, const ResourceIdSet &resource_id_set) {
RAY_CHECK(port_ > 0);
rpc::AssignTaskRequest request;
request.set_intended_worker_id(worker_id_.Binary());
request.mutable_task()->mutable_task_spec()->CopyFrom(
task.GetTaskSpecification().GetMessage());
request.mutable_task()->mutable_task_execution_spec()->CopyFrom(
task.GetTaskExecutionSpec().GetMessage());
request.set_resource_ids(resource_id_set.Serialize());
return rpc_client_->AssignTask(request, [](Status status,
const rpc::AssignTaskReply &reply) {
if (!status.ok()) {
RAY_LOG(DEBUG) << "Worker failed to finish executing task: " << status.ToString();
}
// Worker has finished this task. There's nothing to do here
// and assigning new task will be done when raylet receives
// `TaskDone` message.
});
}
void Worker::DirectActorCallArgWaitComplete(int64_t tag) {
RAY_CHECK(port_ > 0);
rpc::DirectActorCallArgWaitCompleteRequest request;
request.set_tag(tag);
request.set_intended_worker_id(worker_id_.Binary());
auto status = rpc_client_->DirectActorCallArgWaitComplete(
request, [](Status status, const rpc::DirectActorCallArgWaitCompleteReply &reply) {
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to send wait complete: " << status.ToString();
}
});
if (!status.ok()) {
RAY_LOG(ERROR) << "Failed to send wait complete: " << status.ToString();
}
}
} // namespace raylet
} // end namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/worker.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_WORKER_H
#define RAY_RAYLET_WORKER_H
#include <memory>
#include "ray/common/client_connection.h"
#include "ray/common/id.h"
#include "ray/common/task/scheduling_resources.h"
#include "ray/common/task/task.h"
#include "ray/common/task/task_common.h"
#include "ray/rpc/worker/core_worker_client.h"
#include "ray/util/process.h"
namespace ray {
namespace raylet {
/// Worker class encapsulates the implementation details of a worker. A worker
/// is the execution container around a unit of Ray work, such as a task or an
/// actor. Ray units of work execute in the context of a Worker.
class Worker {
public:
/// A constructor that initializes a worker object.
/// NOTE: You MUST manually set the worker process.
Worker(const WorkerID &worker_id, const Language &language, int port,
std::shared_ptr<LocalClientConnection> connection,
rpc::ClientCallManager &client_call_manager);
/// A destructor responsible for freeing all worker state.
~Worker() {}
void MarkDead();
bool IsDead() const;
void MarkBlocked();
void MarkUnblocked();
bool IsBlocked() const;
/// Return the worker's ID.
WorkerID WorkerId() const;
/// Return the worker process.
ProcessHandle Process() const;
void SetProcess(const ProcessHandle &proc);
Language GetLanguage() const;
int Port() const;
void AssignTaskId(const TaskID &task_id);
const TaskID &GetAssignedTaskId() const;
bool AddBlockedTaskId(const TaskID &task_id);
bool RemoveBlockedTaskId(const TaskID &task_id);
const std::unordered_set<TaskID> &GetBlockedTaskIds() const;
void AssignJobId(const JobID &job_id);
const JobID &GetAssignedJobId() const;
void AssignActorId(const ActorID &actor_id);
const ActorID &GetActorId() const;
void MarkDetachedActor();
bool IsDetachedActor() const;
const std::shared_ptr<LocalClientConnection> Connection() const;
const ResourceIdSet &GetLifetimeResourceIds() const;
void SetLifetimeResourceIds(ResourceIdSet &resource_ids);
void ResetLifetimeResourceIds();
const ResourceIdSet &GetTaskResourceIds() const;
void SetTaskResourceIds(ResourceIdSet &resource_ids);
void ResetTaskResourceIds();
ResourceIdSet ReleaseTaskCpuResources();
void AcquireTaskCpuResources(const ResourceIdSet &cpu_resources);
const std::unordered_set<ObjectID> &GetActiveObjectIds() const;
void SetActiveObjectIds(const std::unordered_set<ObjectID> &&object_ids);
Status AssignTask(const Task &task, const ResourceIdSet &resource_id_set);
void DirectActorCallArgWaitComplete(int64_t tag);
void WorkerLeaseGranted(const std::string &address, int port);
/// Cpus borrowed by the worker. This happens when the machine is oversubscribed
/// and the worker does not get back the cpu resources when unblocked.
/// TODO (ion): Add methods to access this variable.
/// TODO (ion): Investigate a more intuitive alternative to track these Cpus.
ResourceSet borrowed_cpu_resources_;
rpc::CoreWorkerClient *rpc_client() { return rpc_client_.get(); }
private:
/// The worker's ID.
WorkerID worker_id_;
/// The worker's process.
ProcessHandle proc_;
/// The language type of this worker.
Language language_;
/// Port that this worker listens on.
/// If port <= 0, this indicates that the worker will not listen to a port.
int port_;
/// Connection state of a worker.
std::shared_ptr<LocalClientConnection> connection_;
/// The worker's currently assigned task.
TaskID assigned_task_id_;
/// Job ID for the worker's current assigned task.
JobID assigned_job_id_;
/// The worker's actor ID. If this is nil, then the worker is not an actor.
ActorID actor_id_;
/// Whether the worker is dead.
bool dead_;
/// Whether the worker is blocked. Workers become blocked in a `ray.get`, if
/// they require a data dependency while executing a task.
bool blocked_;
/// The specific resource IDs that this worker owns for its lifetime. This is
/// only used for actors.
ResourceIdSet lifetime_resource_ids_;
/// The specific resource IDs that this worker currently owns for the duration
// of a task.
ResourceIdSet task_resource_ids_;
std::unordered_set<TaskID> blocked_task_ids_;
/// The set of object IDs that are currently in use on the worker.
std::unordered_set<ObjectID> active_object_ids_;
/// The `ClientCallManager` object that is shared by `CoreWorkerClient` from all
/// workers.
rpc::ClientCallManager &client_call_manager_;
/// The rpc client to send tasks to this worker.
std::unique_ptr<rpc::CoreWorkerClient> rpc_client_;
/// Whether the worker is detached. This is applies when the worker is actor.
/// Detached actor means the actor's creator can exit without killing this actor.
bool is_detached_actor_;
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_WORKER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/worker_pool.cc
|
C++
|
#include "ray/raylet/worker_pool.h"
#include <sys/wait.h>
#include <algorithm>
#include <boost/asio/io_service.hpp>
#include <boost/process/args.hpp>
#include <boost/process/async.hpp>
#include <boost/process/search_path.hpp>
#include "ray/common/constants.h"
#include "ray/common/ray_config.h"
#include "ray/common/status.h"
#include "ray/gcs/pb_util.h"
#include "ray/stats/stats.h"
#include "ray/util/logging.h"
#include "ray/util/util.h"
namespace {
// A helper function to get a worker from a list.
std::shared_ptr<ray::raylet::Worker> GetWorker(
const std::unordered_set<std::shared_ptr<ray::raylet::Worker>> &worker_pool,
const std::shared_ptr<ray::LocalClientConnection> &connection) {
for (auto it = worker_pool.begin(); it != worker_pool.end(); it++) {
if ((*it)->Connection() == connection) {
return (*it);
}
}
return nullptr;
}
// A helper function to remove a worker from a list. Returns true if the worker
// was found and removed.
bool RemoveWorker(std::unordered_set<std::shared_ptr<ray::raylet::Worker>> &worker_pool,
const std::shared_ptr<ray::raylet::Worker> &worker) {
return worker_pool.erase(worker) > 0;
}
} // namespace
namespace ray {
namespace raylet {
/// A constructor that initializes a worker pool with num_workers workers for
/// each language.
WorkerPool::WorkerPool(boost::asio::io_service &io_service, int num_workers,
int maximum_startup_concurrency,
std::shared_ptr<gcs::GcsClient> gcs_client,
const WorkerCommandMap &worker_commands)
: io_service_(&io_service),
maximum_startup_concurrency_(maximum_startup_concurrency),
gcs_client_(std::move(gcs_client)) {
RAY_CHECK(maximum_startup_concurrency > 0);
for (const auto &entry : worker_commands) {
// Initialize the pool state for this language.
auto &state = states_by_lang_[entry.first];
switch (entry.first) {
case Language::PYTHON:
state.num_workers_per_process =
RayConfig::instance().num_workers_per_process_python();
break;
case Language::JAVA:
state.num_workers_per_process =
RayConfig::instance().num_workers_per_process_java();
break;
default:
RAY_LOG(FATAL) << "The number of workers per process for "
<< Language_Name(entry.first) << " worker is not set.";
}
RAY_CHECK(state.num_workers_per_process > 0)
<< "Number of workers per process of language " << Language_Name(entry.first)
<< " must be positive.";
state.multiple_for_warning =
std::max(state.num_workers_per_process,
std::max(num_workers, maximum_startup_concurrency));
// Set worker command for this language.
state.worker_command = entry.second;
RAY_CHECK(!state.worker_command.empty()) << "Worker command must not be empty.";
}
Start(num_workers);
}
void WorkerPool::Start(int num_workers) {
for (auto &entry : states_by_lang_) {
auto &state = entry.second;
int num_worker_processes = static_cast<int>(
std::ceil(static_cast<double>(num_workers) / state.num_workers_per_process));
for (int i = 0; i < num_worker_processes; i++) {
StartWorkerProcess(entry.first);
}
}
}
WorkerPool::~WorkerPool() {
std::unordered_set<ProcessHandle> procs_to_kill;
for (const auto &entry : states_by_lang_) {
// Kill all registered workers. NOTE(swang): This assumes that the registered
// workers were started by the pool.
for (const auto &worker : entry.second.registered_workers) {
procs_to_kill.insert(worker->Process());
}
// Kill all the workers that have been started but not registered.
for (const auto &starting_worker : entry.second.starting_worker_processes) {
procs_to_kill.insert(starting_worker.first);
}
}
for (const auto &proc : procs_to_kill) {
proc.get()->terminate();
proc.get()->wait();
}
}
uint32_t WorkerPool::Size(const Language &language) const {
const auto state = states_by_lang_.find(language);
if (state == states_by_lang_.end()) {
return 0;
} else {
return static_cast<uint32_t>(state->second.idle.size() +
state->second.idle_actor.size());
}
}
ProcessHandle WorkerPool::StartWorkerProcess(
const Language &language, const std::vector<std::string> &dynamic_options) {
auto &state = GetStateForLanguage(language);
// If we are already starting up too many workers, then return without starting
// more.
int starting_workers = 0;
for (auto &entry : state.starting_worker_processes) {
starting_workers += entry.second;
}
if (starting_workers >= maximum_startup_concurrency_) {
// Workers have been started, but not registered. Force start disabled -- returning.
RAY_LOG(DEBUG) << "Worker not started, " << starting_workers
<< " workers of language type " << static_cast<int>(language)
<< " pending registration";
return ProcessHandle();
}
// Either there are no workers pending registration or the worker start is being forced.
RAY_LOG(DEBUG) << "Starting new worker process, current pool has "
<< state.idle_actor.size() << " actor workers, and " << state.idle.size()
<< " non-actor workers";
int workers_to_start;
if (dynamic_options.empty()) {
workers_to_start = state.num_workers_per_process;
} else {
workers_to_start = 1;
}
// Extract pointers from the worker command to pass into execvp.
std::vector<std::string> worker_command_args;
size_t dynamic_option_index = 0;
bool num_workers_arg_replaced = false;
for (auto const &token : state.worker_command) {
const auto option_placeholder =
kWorkerDynamicOptionPlaceholderPrefix + std::to_string(dynamic_option_index);
if (token == option_placeholder) {
if (!dynamic_options.empty()) {
RAY_CHECK(dynamic_option_index < dynamic_options.size());
auto options = SplitStrByWhitespaces(dynamic_options[dynamic_option_index]);
worker_command_args.insert(worker_command_args.end(), options.begin(),
options.end());
++dynamic_option_index;
}
} else {
size_t num_workers_index = token.find(kWorkerNumWorkersPlaceholder);
if (num_workers_index != std::string::npos) {
std::string arg = token;
worker_command_args.push_back(arg.replace(num_workers_index,
strlen(kWorkerNumWorkersPlaceholder),
std::to_string(workers_to_start)));
num_workers_arg_replaced = true;
} else {
worker_command_args.push_back(token);
}
}
}
RAY_CHECK(num_workers_arg_replaced || state.num_workers_per_process == 1)
<< "Expect to start " << state.num_workers_per_process << " workers per "
<< Language_Name(language) << " worker process. But the "
<< kWorkerNumWorkersPlaceholder << "placeholder is not found in worker command.";
ProcessHandle proc = StartProcess(worker_command_args);
RAY_CHECK(proc);
RAY_LOG(DEBUG) << "Started worker process of " << workers_to_start
<< " worker(s) with pid " << proc.get()->id();
state.starting_worker_processes.emplace(proc, workers_to_start);
return proc;
}
ProcessHandle WorkerPool::StartProcess(
const std::vector<std::string> &worker_command_args) {
if (RAY_LOG_ENABLED(DEBUG)) {
std::stringstream stream;
stream << "Starting worker process with command:";
for (const auto &arg : worker_command_args) {
stream << " " << arg;
}
RAY_LOG(DEBUG) << stream.str();
}
// Launch the process to create the worker.
auto exit_callback = [=](int, const std::error_code &ec) {
// This callback seems to be necessary for proper zombie cleanup.
// However, it doesn't need to do anything.
};
std::error_code ec;
ProcessHandle child(
std::make_shared<Process>(boost::process::args(worker_command_args), *io_service_,
boost::process::on_exit = exit_callback, ec));
if (!child.get()->valid()) {
child = ProcessHandle();
}
if (!child || !child.get()->valid() || ec) {
// The worker failed to start. This is a fatal error.
RAY_LOG(FATAL) << "Failed to start worker with return value " << ec << ": "
<< ec.message();
}
return child;
}
Status WorkerPool::RegisterWorker(const std::shared_ptr<Worker> &worker, pid_t pid) {
const auto port = worker->Port();
RAY_LOG(DEBUG) << "Registering worker with pid " << pid << ", port: " << port;
auto &state = GetStateForLanguage(worker->GetLanguage());
auto it = state.starting_worker_processes.find(ProcessHandle::FromPid(pid));
if (it == state.starting_worker_processes.end()) {
RAY_LOG(WARNING) << "Received a register request from an unknown worker " << pid;
return Status::Invalid("Unknown worker");
}
worker->SetProcess(it->first);
it->second--;
if (it->second == 0) {
state.starting_worker_processes.erase(it);
}
state.registered_workers.emplace(std::move(worker));
return Status::OK();
}
Status WorkerPool::RegisterDriver(const std::shared_ptr<Worker> &driver) {
RAY_CHECK(!driver->GetAssignedTaskId().IsNil());
auto &state = GetStateForLanguage(driver->GetLanguage());
state.registered_drivers.insert(std::move(driver));
return Status::OK();
}
std::shared_ptr<Worker> WorkerPool::GetRegisteredWorker(
const std::shared_ptr<LocalClientConnection> &connection) const {
for (const auto &entry : states_by_lang_) {
auto worker = GetWorker(entry.second.registered_workers, connection);
if (worker != nullptr) {
return worker;
}
}
return nullptr;
}
std::shared_ptr<Worker> WorkerPool::GetRegisteredDriver(
const std::shared_ptr<LocalClientConnection> &connection) const {
for (const auto &entry : states_by_lang_) {
auto driver = GetWorker(entry.second.registered_drivers, connection);
if (driver != nullptr) {
return driver;
}
}
return nullptr;
}
void WorkerPool::PushWorker(const std::shared_ptr<Worker> &worker) {
// Since the worker is now idle, unset its assigned task ID.
RAY_CHECK(worker->GetAssignedTaskId().IsNil())
<< "Idle workers cannot have an assigned task ID";
auto &state = GetStateForLanguage(worker->GetLanguage());
auto it = state.dedicated_workers_to_tasks.find(worker->Process());
if (it != state.dedicated_workers_to_tasks.end()) {
// The worker is used for the actor creation task with dynamic options.
// Put it into idle dedicated worker pool.
const auto task_id = it->second;
state.idle_dedicated_workers[task_id] = std::move(worker);
} else {
// The worker is not used for the actor creation task without dynamic options.
// Put the worker to the corresponding idle pool.
if (worker->GetActorId().IsNil()) {
state.idle.insert(std::move(worker));
} else {
state.idle_actor[worker->GetActorId()] = std::move(worker);
}
}
}
std::shared_ptr<Worker> WorkerPool::PopWorker(const TaskSpecification &task_spec) {
auto &state = GetStateForLanguage(task_spec.GetLanguage());
std::shared_ptr<Worker> worker = nullptr;
ProcessHandle proc;
if (task_spec.IsActorCreationTask() && !task_spec.DynamicWorkerOptions().empty()) {
// Code path of actor creation task with dynamic worker options.
// Try to pop it from idle dedicated pool.
auto it = state.idle_dedicated_workers.find(task_spec.TaskId());
if (it != state.idle_dedicated_workers.end()) {
// There is an idle dedicated worker for this task.
worker = std::move(it->second);
state.idle_dedicated_workers.erase(it);
// Because we found a worker that can perform this task,
// we can remove it from dedicated_workers_to_tasks.
state.dedicated_workers_to_tasks.erase(worker->Process());
state.tasks_to_dedicated_workers.erase(task_spec.TaskId());
} else if (!HasPendingWorkerForTask(task_spec.GetLanguage(), task_spec.TaskId())) {
// We are not pending a registration from a worker for this task,
// so start a new worker process for this task.
proc =
StartWorkerProcess(task_spec.GetLanguage(), task_spec.DynamicWorkerOptions());
if (proc) {
state.dedicated_workers_to_tasks[proc] = task_spec.TaskId();
state.tasks_to_dedicated_workers[task_spec.TaskId()] = proc;
}
}
} else if (!task_spec.IsActorTask()) {
// Code path of normal task or actor creation task without dynamic worker options.
if (!state.idle.empty()) {
worker = std::move(*state.idle.begin());
state.idle.erase(state.idle.begin());
} else {
// There are no more non-actor workers available to execute this task.
// Start a new worker process.
proc = StartWorkerProcess(task_spec.GetLanguage());
}
} else {
// Code path of actor task.
const auto &actor_id = task_spec.ActorId();
auto actor_entry = state.idle_actor.find(actor_id);
if (actor_entry != state.idle_actor.end()) {
worker = std::move(actor_entry->second);
state.idle_actor.erase(actor_entry);
}
}
if (worker == nullptr && proc) {
WarnAboutSize();
}
return worker;
}
bool WorkerPool::DisconnectWorker(const std::shared_ptr<Worker> &worker) {
auto &state = GetStateForLanguage(worker->GetLanguage());
RAY_CHECK(RemoveWorker(state.registered_workers, worker));
stats::CurrentWorker().Record(
0, {{stats::LanguageKey, Language_Name(worker->GetLanguage())},
{stats::WorkerPidKey, std::to_string(worker->Process().get()->id())}});
return RemoveWorker(state.idle, worker);
}
void WorkerPool::DisconnectDriver(const std::shared_ptr<Worker> &driver) {
auto &state = GetStateForLanguage(driver->GetLanguage());
RAY_CHECK(RemoveWorker(state.registered_drivers, driver));
stats::CurrentDriver().Record(
0, {{stats::LanguageKey, Language_Name(driver->GetLanguage())},
{stats::WorkerPidKey, std::to_string(driver->Process().get()->id())}});
}
inline WorkerPool::State &WorkerPool::GetStateForLanguage(const Language &language) {
auto state = states_by_lang_.find(language);
RAY_CHECK(state != states_by_lang_.end()) << "Required Language isn't supported.";
return state->second;
}
std::vector<std::shared_ptr<Worker>> WorkerPool::GetWorkersRunningTasksForJob(
const JobID &job_id) const {
std::vector<std::shared_ptr<Worker>> workers;
for (const auto &entry : states_by_lang_) {
for (const auto &worker : entry.second.registered_workers) {
if (worker->GetAssignedJobId() == job_id) {
workers.push_back(worker);
}
}
}
return workers;
}
const std::vector<std::shared_ptr<Worker>> WorkerPool::GetAllWorkers() const {
std::vector<std::shared_ptr<Worker>> workers;
for (const auto &entry : states_by_lang_) {
for (const auto &worker : entry.second.registered_workers) {
workers.push_back(worker);
}
}
return workers;
}
const std::vector<std::shared_ptr<Worker>> WorkerPool::GetAllDrivers() const {
std::vector<std::shared_ptr<Worker>> drivers;
for (const auto &entry : states_by_lang_) {
for (const auto &driver : entry.second.registered_drivers) {
drivers.push_back(driver);
}
}
return drivers;
}
void WorkerPool::WarnAboutSize() {
for (const auto &entry : states_by_lang_) {
auto state = entry.second;
int64_t num_workers_started_or_registered = 0;
num_workers_started_or_registered +=
static_cast<int64_t>(state.registered_workers.size());
for (const auto &starting_process : state.starting_worker_processes) {
num_workers_started_or_registered += starting_process.second;
}
int64_t multiple = num_workers_started_or_registered / state.multiple_for_warning;
std::stringstream warning_message;
if (multiple >= 3 && multiple > state.last_warning_multiple) {
// Push an error message to the user if the worker pool tells us that it is
// getting too big.
state.last_warning_multiple = multiple;
warning_message << "WARNING: " << num_workers_started_or_registered << " "
<< Language_Name(entry.first)
<< " workers have been started. This could be a result of using "
<< "a large number of actors, or it could be a consequence of "
<< "using nested tasks "
<< "(see https://github.com/ray-project/ray/issues/3644) for "
<< "some a discussion of workarounds.";
auto error_data_ptr = gcs::CreateErrorTableData(
"worker_pool_large", warning_message.str(), current_time_ms());
RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr));
}
}
}
bool WorkerPool::HasPendingWorkerForTask(const Language &language,
const TaskID &task_id) {
auto &state = GetStateForLanguage(language);
auto it = state.tasks_to_dedicated_workers.find(task_id);
return it != state.tasks_to_dedicated_workers.end();
}
std::unordered_set<ObjectID> WorkerPool::GetActiveObjectIDs() const {
std::unordered_set<ObjectID> active_object_ids;
for (const auto &entry : states_by_lang_) {
for (const auto &worker : entry.second.registered_workers) {
active_object_ids.insert(worker->GetActiveObjectIds().begin(),
worker->GetActiveObjectIds().end());
}
for (const auto &driver : entry.second.registered_drivers) {
active_object_ids.insert(driver->GetActiveObjectIds().begin(),
driver->GetActiveObjectIds().end());
}
}
return active_object_ids;
}
std::string WorkerPool::DebugString() const {
std::stringstream result;
result << "WorkerPool:";
for (const auto &entry : states_by_lang_) {
result << "\n- num " << Language_Name(entry.first)
<< " workers: " << entry.second.registered_workers.size();
result << "\n- num " << Language_Name(entry.first)
<< " drivers: " << entry.second.registered_drivers.size();
}
return result.str();
}
void WorkerPool::RecordMetrics() const {
for (const auto &entry : states_by_lang_) {
// Record worker.
for (auto worker : entry.second.registered_workers) {
stats::CurrentWorker().Record(
worker->Process().get()->id(),
{{stats::LanguageKey, Language_Name(worker->GetLanguage())},
{stats::WorkerPidKey, std::to_string(worker->Process().get()->id())}});
}
// Record driver.
for (auto driver : entry.second.registered_drivers) {
stats::CurrentDriver().Record(
driver->Process().get()->id(),
{{stats::LanguageKey, Language_Name(driver->GetLanguage())},
{stats::WorkerPidKey, std::to_string(driver->Process().get()->id())}});
}
}
}
} // namespace raylet
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/worker_pool.h
|
C/C++ Header
|
#ifndef RAY_RAYLET_WORKER_POOL_H
#define RAY_RAYLET_WORKER_POOL_H
#include <inttypes.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <boost/asio/io_service.hpp>
#include "gtest/gtest.h"
#include "ray/common/client_connection.h"
#include "ray/common/task/task.h"
#include "ray/common/task/task_common.h"
#include "ray/gcs/redis_gcs_client.h"
#include "ray/raylet/worker.h"
namespace ray {
namespace raylet {
using WorkerCommandMap =
std::unordered_map<Language, std::vector<std::string>, std::hash<int>>;
class Worker;
/// \class WorkerPool
///
/// The WorkerPool is responsible for managing a pool of Workers. Each Worker
/// is a container for a unit of work.
class WorkerPool {
public:
/// Create a pool and asynchronously start at least the specified number of workers per
/// language.
/// Once each worker process has registered with an external server, the
/// process should create and register the specified number of workers, and add them to
/// the pool.
///
/// \param num_workers The number of workers to start, per language.
/// \param maximum_startup_concurrency The maximum number of worker processes
/// that can be started in parallel (typically this should be set to the number of CPU
/// resources on the machine).
/// \param worker_commands The commands used to start the worker process, grouped by
/// language.
WorkerPool(boost::asio::io_service &io_service, int num_workers,
int maximum_startup_concurrency, std::shared_ptr<gcs::GcsClient> gcs_client,
const WorkerCommandMap &worker_commands);
/// Destructor responsible for freeing a set of workers owned by this class.
virtual ~WorkerPool();
/// Register a new worker. The Worker should be added by the caller to the
/// pool after it becomes idle (e.g., requests a work assignment).
///
/// \param The Worker to be registered.
/// \return If the registration is successful.
Status RegisterWorker(const std::shared_ptr<Worker> &worker, pid_t pid);
/// Register a new driver.
///
/// \param The driver to be registered.
/// \return If the registration is successful.
Status RegisterDriver(const std::shared_ptr<Worker> &worker);
/// Get the client connection's registered worker.
///
/// \param The client connection owned by a registered worker.
/// \return The Worker that owns the given client connection. Returns nullptr
/// if the client has not registered a worker yet.
std::shared_ptr<Worker> GetRegisteredWorker(
const std::shared_ptr<LocalClientConnection> &connection) const;
/// Get the client connection's registered driver.
///
/// \param The client connection owned by a registered driver.
/// \return The Worker that owns the given client connection. Returns nullptr
/// if the client has not registered a driver.
std::shared_ptr<Worker> GetRegisteredDriver(
const std::shared_ptr<LocalClientConnection> &connection) const;
/// Disconnect a registered worker.
///
/// \param The worker to disconnect. The worker must be registered.
/// \return Whether the given worker was in the pool of idle workers.
bool DisconnectWorker(const std::shared_ptr<Worker> &worker);
/// Disconnect a registered driver.
///
/// \param The driver to disconnect. The driver must be registered.
void DisconnectDriver(const std::shared_ptr<Worker> &driver);
/// Add an idle worker to the pool.
///
/// \param The idle worker to add.
void PushWorker(const std::shared_ptr<Worker> &worker);
/// Pop an idle worker from the pool. The caller is responsible for pushing
/// the worker back onto the pool once the worker has completed its work.
///
/// \param task_spec The returned worker must be able to execute this task.
/// \return An idle worker with the requested task spec. Returns nullptr if no
/// such worker exists.
std::shared_ptr<Worker> PopWorker(const TaskSpecification &task_spec);
/// Return the current size of the worker pool for the requested language. Counts only
/// idle workers.
///
/// \param language The requested language.
/// \return The total count of all workers (actor and non-actor) in the pool.
uint32_t Size(const Language &language) const;
/// Get all the workers which are running tasks for a given job.
///
/// \param job_id The job ID.
/// \return A list containing all the workers which are running tasks for the job.
std::vector<std::shared_ptr<Worker>> GetWorkersRunningTasksForJob(
const JobID &job_id) const;
/// Get all the workers.
///
/// \return A list containing all the workers.
const std::vector<std::shared_ptr<Worker>> GetAllWorkers() const;
/// Get all the drivers.
///
/// \return A list containing all the drivers.
const std::vector<std::shared_ptr<Worker>> GetAllDrivers() const;
/// Whether there is a pending worker for the given task.
/// Note that, this is only used for actor creation task with dynamic options.
/// And if the worker registered but isn't assigned a task,
/// the worker also is in pending state, and this'll return true.
///
/// \param language The required language.
/// \param task_id The task that we want to query.
bool HasPendingWorkerForTask(const Language &language, const TaskID &task_id);
/// Get the set of active object IDs from all workers in the worker pool.
/// \return A set containing the active object IDs.
std::unordered_set<ObjectID> GetActiveObjectIDs() const;
/// Returns debug string for class.
///
/// \return string.
std::string DebugString() const;
/// Record metrics.
void RecordMetrics() const;
protected:
/// Asynchronously start a new worker process. Once the worker process has
/// registered with an external server, the process should create and
/// register num_workers_per_process workers, then add them to the pool.
/// Failure to start the worker process is a fatal error. If too many workers
/// are already being started, then this function will return without starting
/// any workers.
///
/// \param language Which language this worker process should be.
/// \param dynamic_options The dynamic options that we should add for worker command.
/// \return The id of the process that we started if it's positive,
/// otherwise it means we didn't start a process.
ProcessHandle StartWorkerProcess(const Language &language,
const std::vector<std::string> &dynamic_options = {});
/// The implementation of how to start a new worker process with command arguments.
/// The lifetime of the process is tied to that of the returned object,
/// unless the caller manually detaches the process after the call.
///
/// \param worker_command_args The command arguments of new worker process.
/// \return An object representing the started worker process.
virtual ProcessHandle StartProcess(const std::vector<std::string> &worker_command_args);
/// Push an warning message to user if worker pool is getting to big.
virtual void WarnAboutSize();
/// An internal data structure that maintains the pool state per language.
struct State {
/// The commands and arguments used to start the worker process
std::vector<std::string> worker_command;
/// The number of workers per process.
int num_workers_per_process;
/// The pool of dedicated workers for actor creation tasks
/// with prefix or suffix worker command.
std::unordered_map<TaskID, std::shared_ptr<Worker>> idle_dedicated_workers;
/// The pool of idle non-actor workers.
std::unordered_set<std::shared_ptr<Worker>> idle;
/// The pool of idle actor workers.
std::unordered_map<ActorID, std::shared_ptr<Worker>> idle_actor;
/// All workers that have registered and are still connected, including both
/// idle and executing.
std::unordered_set<std::shared_ptr<Worker>> registered_workers;
/// All drivers that have registered and are still connected.
std::unordered_set<std::shared_ptr<Worker>> registered_drivers;
/// A map from the pids of starting worker processes
/// to the number of their unregistered workers.
std::unordered_map<ProcessHandle, int> starting_worker_processes;
/// A map for looking up the task with dynamic options by the pid of
/// worker. Note that this is used for the dedicated worker processes.
std::unordered_map<ProcessHandle, TaskID> dedicated_workers_to_tasks;
/// A map for speeding up looking up the pending worker for the given task.
std::unordered_map<TaskID, ProcessHandle> tasks_to_dedicated_workers;
/// We'll push a warning to the user every time a multiple of this many
/// worker processes has been started.
int multiple_for_warning;
/// The last size at which a warning about the number of registered workers
/// was generated.
int64_t last_warning_multiple;
};
/// Pool states per language.
std::unordered_map<Language, State, std::hash<int>> states_by_lang_;
private:
/// Force-start at least num_workers workers for this language. Used for internal and
/// test purpose only.
///
/// \param num_workers The number of workers to start, per language.
void Start(int num_workers);
/// A helper function that returns the reference of the pool state
/// for a given language.
State &GetStateForLanguage(const Language &language);
/// Required by Boost.Process for managing subprocesses (e.g. reaping zombies).
boost::asio::io_service *io_service_;
/// The maximum number of worker processes that can be started concurrently.
int maximum_startup_concurrency_;
/// A client connection to the GCS.
std::shared_ptr<gcs::GcsClient> gcs_client_;
FRIEND_TEST(WorkerPoolTest, InitialWorkerProcessCount);
};
} // namespace raylet
} // namespace ray
#endif // RAY_RAYLET_WORKER_POOL_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/raylet/worker_pool_test.cc
|
C++
|
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ray/common/constants.h"
#include "ray/raylet/node_manager.h"
#include "ray/raylet/worker_pool.h"
namespace ray {
namespace raylet {
int NUM_WORKERS_PER_PROCESS = 3;
int MAXIMUM_STARTUP_CONCURRENCY = 5;
std::vector<Language> LANGUAGES = {Language::PYTHON, Language::JAVA};
class WorkerPoolMock : public WorkerPool {
public:
WorkerPoolMock(boost::asio::io_service &io_service)
: WorkerPoolMock(
io_service,
{{Language::PYTHON,
{"dummy_py_worker_command", "--foo=RAY_WORKER_NUM_WORKERS_PLACEHOLDER"}},
{Language::JAVA,
{"dummy_java_worker_command",
"--foo=RAY_WORKER_NUM_WORKERS_PLACEHOLDER"}}}) {}
explicit WorkerPoolMock(boost::asio::io_service &io_service,
const WorkerCommandMap &worker_commands)
: WorkerPool(io_service, 0, MAXIMUM_STARTUP_CONCURRENCY, nullptr, worker_commands),
last_worker_process_() {
for (auto &entry : states_by_lang_) {
entry.second.num_workers_per_process = NUM_WORKERS_PER_PROCESS;
}
}
~WorkerPoolMock() {
// Avoid killing real processes
states_by_lang_.clear();
}
using WorkerPool::StartWorkerProcess; // we need this to be public for testing
ProcessHandle StartProcess(
const std::vector<std::string> &worker_command_args) override {
#ifndef PID_MAX_LIMIT
// This is defined by Linux to be the maximum allowable number of processes
// There's no guarantee for other OSes, but it's good enough for testing...
enum { PID_MAX_LIMIT = 1 << 22 };
#endif
// Use a bogus process ID that won't conflict with those in the system
pid_t pid = static_cast<pid_t>(PID_MAX_LIMIT + 1 + worker_commands_by_proc_.size());
Process proc(pid);
proc.detach();
last_worker_process_ = std::make_shared<Process>(std::move(proc));
worker_commands_by_proc_[last_worker_process_] = worker_command_args;
return last_worker_process_;
}
void WarnAboutSize() override {}
ProcessHandle LastStartedWorkerProcess() const { return last_worker_process_; }
const std::vector<std::string> &GetWorkerCommand(ProcessHandle proc) {
return worker_commands_by_proc_[proc];
}
int NumWorkersStarting() const {
int total = 0;
for (auto &state_entry : states_by_lang_) {
for (auto &process_entry : state_entry.second.starting_worker_processes) {
total += process_entry.second;
}
}
return total;
}
int NumWorkerProcessesStarting() const {
int total = 0;
for (auto &entry : states_by_lang_) {
total += entry.second.starting_worker_processes.size();
}
return total;
}
private:
ProcessHandle last_worker_process_;
// The worker commands by process.
std::unordered_map<ProcessHandle, std::vector<std::string>> worker_commands_by_proc_;
};
class WorkerPoolTest : public ::testing::Test {
public:
WorkerPoolTest()
: worker_pool_(io_service_),
error_message_type_(1),
client_call_manager_(io_service_) {}
std::shared_ptr<Worker> CreateWorker(ProcessHandle proc,
const Language &language = Language::PYTHON) {
std::function<void(LocalClientConnection &)> client_handler =
[this](LocalClientConnection &client) { HandleNewClient(client); };
std::function<void(std::shared_ptr<LocalClientConnection>, int64_t, const uint8_t *)>
message_handler = [this](std::shared_ptr<LocalClientConnection> client,
int64_t message_type, const uint8_t *message) {
HandleMessage(client, message_type, message);
};
local_stream_protocol::socket socket(io_service_);
auto client =
LocalClientConnection::Create(client_handler, message_handler, std::move(socket),
"worker", {}, error_message_type_);
std::shared_ptr<Worker> worker = std::make_shared<Worker>(
WorkerID::FromRandom(), language, -1, client, client_call_manager_);
if (proc) {
worker->SetProcess(proc);
}
return worker;
}
void SetWorkerCommands(const WorkerCommandMap &worker_commands) {
WorkerPoolMock worker_pool(io_service_, worker_commands);
this->worker_pool_ = std::move(worker_pool);
}
protected:
boost::asio::io_service io_service_;
WorkerPoolMock worker_pool_;
int64_t error_message_type_;
rpc::ClientCallManager client_call_manager_;
private:
void HandleNewClient(LocalClientConnection &){};
void HandleMessage(std::shared_ptr<LocalClientConnection>, int64_t, const uint8_t *){};
};
static inline TaskSpecification ExampleTaskSpec(
const ActorID actor_id = ActorID::Nil(), const Language &language = Language::PYTHON,
const ActorID actor_creation_id = ActorID::Nil(),
const std::vector<std::string> &dynamic_worker_options = {}) {
rpc::TaskSpec message;
message.set_language(language);
if (!actor_id.IsNil()) {
message.set_type(TaskType::ACTOR_TASK);
message.mutable_actor_task_spec()->set_actor_id(actor_id.Binary());
} else if (!actor_creation_id.IsNil()) {
message.set_type(TaskType::ACTOR_CREATION_TASK);
message.mutable_actor_creation_task_spec()->set_actor_id(actor_creation_id.Binary());
for (const auto &option : dynamic_worker_options) {
message.mutable_actor_creation_task_spec()->add_dynamic_worker_options(option);
}
} else {
message.set_type(TaskType::NORMAL_TASK);
}
return TaskSpecification(std::move(message));
}
TEST_F(WorkerPoolTest, CompareWorkerProcessObjects) {
typedef ProcessHandle T;
T a(std::make_shared<Process>()), b(std::make_shared<Process>()), empty = T();
ASSERT_TRUE(std::equal_to<T>()(a, a));
ASSERT_TRUE(!std::equal_to<T>()(a, b));
ASSERT_TRUE(!std::equal_to<T>()(b, a));
ASSERT_TRUE(!std::equal_to<T>()(empty, a));
ASSERT_TRUE(!std::equal_to<T>()(a, empty));
}
TEST_F(WorkerPoolTest, HandleWorkerRegistration) {
ProcessHandle proc = worker_pool_.StartWorkerProcess(Language::PYTHON);
std::vector<std::shared_ptr<Worker>> workers;
for (int i = 0; i < NUM_WORKERS_PER_PROCESS; i++) {
workers.push_back(CreateWorker(ProcessHandle()));
}
for (const auto &worker : workers) {
// Check that there's still a starting worker process
// before all workers have been registered
ASSERT_EQ(worker_pool_.NumWorkerProcessesStarting(), 1);
// Check that we cannot lookup the worker before it's registered.
ASSERT_EQ(worker_pool_.GetRegisteredWorker(worker->Connection()), nullptr);
RAY_CHECK_OK(worker_pool_.RegisterWorker(worker, proc.get()->id()));
// Check that we can lookup the worker after it's registered.
ASSERT_EQ(worker_pool_.GetRegisteredWorker(worker->Connection()), worker);
}
// Check that there's no starting worker process
ASSERT_EQ(worker_pool_.NumWorkerProcessesStarting(), 0);
for (const auto &worker : workers) {
worker_pool_.DisconnectWorker(worker);
// Check that we cannot lookup the worker after it's disconnected.
ASSERT_EQ(worker_pool_.GetRegisteredWorker(worker->Connection()), nullptr);
}
}
TEST_F(WorkerPoolTest, StartupWorkerProcessCount) {
std::string num_workers_arg =
std::string("--foo=") + std::to_string(NUM_WORKERS_PER_PROCESS);
std::vector<std::vector<std::string>> worker_commands = {
{{"dummy_py_worker_command", num_workers_arg},
{"dummy_java_worker_command", num_workers_arg}}};
int desired_initial_worker_process_count_per_language = 100;
int expected_worker_process_count =
static_cast<int>(std::ceil(static_cast<double>(MAXIMUM_STARTUP_CONCURRENCY) /
NUM_WORKERS_PER_PROCESS * LANGUAGES.size()));
ASSERT_TRUE(expected_worker_process_count <
static_cast<int>(desired_initial_worker_process_count_per_language *
LANGUAGES.size()));
ProcessHandle last_started_worker_process;
for (int i = 0; i < desired_initial_worker_process_count_per_language; i++) {
for (size_t j = 0; j < LANGUAGES.size(); j++) {
worker_pool_.StartWorkerProcess(LANGUAGES[j]);
ASSERT_TRUE(worker_pool_.NumWorkerProcessesStarting() <=
expected_worker_process_count);
ProcessHandle prev = worker_pool_.LastStartedWorkerProcess();
if (last_started_worker_process.get() != prev.get()) {
last_started_worker_process = prev;
const auto &real_command =
worker_pool_.GetWorkerCommand(worker_pool_.LastStartedWorkerProcess());
ASSERT_EQ(real_command, worker_commands[j]);
} else {
ASSERT_EQ(worker_pool_.NumWorkerProcessesStarting(),
expected_worker_process_count);
ASSERT_TRUE(static_cast<int>(i * LANGUAGES.size() + j) >=
expected_worker_process_count);
}
}
}
// Check number of starting workers
ASSERT_EQ(worker_pool_.NumWorkerProcessesStarting(), expected_worker_process_count);
}
TEST_F(WorkerPoolTest, InitialWorkerProcessCount) {
worker_pool_.Start(1);
// Here we try to start only 1 worker for each worker language. But since each worker
// process contains exactly NUM_WORKERS_PER_PROCESS (3) workers here, it's expected to
// see 3 workers for each worker language, instead of 1.
ASSERT_NE(worker_pool_.NumWorkersStarting(), 1 * LANGUAGES.size());
ASSERT_EQ(worker_pool_.NumWorkersStarting(),
NUM_WORKERS_PER_PROCESS * LANGUAGES.size());
ASSERT_EQ(worker_pool_.NumWorkerProcessesStarting(), LANGUAGES.size());
}
TEST_F(WorkerPoolTest, HandleWorkerPushPop) {
// Try to pop a worker from the empty pool and make sure we don't get one.
std::shared_ptr<Worker> popped_worker;
const auto task_spec = ExampleTaskSpec();
popped_worker = worker_pool_.PopWorker(task_spec);
ASSERT_EQ(popped_worker, nullptr);
// Create some workers.
std::unordered_set<std::shared_ptr<Worker>> workers;
workers.insert(CreateWorker(std::make_shared<Process>()));
workers.insert(CreateWorker(std::make_shared<Process>()));
// Add the workers to the pool.
for (auto &worker : workers) {
worker_pool_.PushWorker(worker);
}
// Pop two workers and make sure they're one of the workers we created.
popped_worker = worker_pool_.PopWorker(task_spec);
ASSERT_NE(popped_worker, nullptr);
ASSERT_TRUE(workers.count(popped_worker) > 0);
popped_worker = worker_pool_.PopWorker(task_spec);
ASSERT_NE(popped_worker, nullptr);
ASSERT_TRUE(workers.count(popped_worker) > 0);
popped_worker = worker_pool_.PopWorker(task_spec);
ASSERT_EQ(popped_worker, nullptr);
}
TEST_F(WorkerPoolTest, PopActorWorker) {
// Create a worker.
auto worker = CreateWorker(std::make_shared<Process>());
// Add the worker to the pool.
worker_pool_.PushWorker(worker);
// Assign an actor ID to the worker.
const auto task_spec = ExampleTaskSpec();
auto actor = worker_pool_.PopWorker(task_spec);
const auto job_id = JobID::FromInt(1);
auto actor_id = ActorID::Of(job_id, TaskID::ForDriverTask(job_id), 1);
actor->AssignActorId(actor_id);
worker_pool_.PushWorker(actor);
// Check that there are no more non-actor workers.
ASSERT_EQ(worker_pool_.PopWorker(task_spec), nullptr);
// Check that we can pop the actor worker.
const auto actor_task_spec = ExampleTaskSpec(actor_id);
actor = worker_pool_.PopWorker(actor_task_spec);
ASSERT_EQ(actor, worker);
ASSERT_EQ(actor->GetActorId(), actor_id);
}
TEST_F(WorkerPoolTest, PopWorkersOfMultipleLanguages) {
// Create a Python Worker, and add it to the pool
auto py_worker = CreateWorker(std::make_shared<Process>(), Language::PYTHON);
worker_pool_.PushWorker(py_worker);
// Check that no worker will be popped if the given task is a Java task
const auto java_task_spec = ExampleTaskSpec(ActorID::Nil(), Language::JAVA);
ASSERT_EQ(worker_pool_.PopWorker(java_task_spec), nullptr);
// Check that the worker can be popped if the given task is a Python task
const auto py_task_spec = ExampleTaskSpec(ActorID::Nil(), Language::PYTHON);
ASSERT_NE(worker_pool_.PopWorker(py_task_spec), nullptr);
// Create a Java Worker, and add it to the pool
auto java_worker = CreateWorker(std::make_shared<Process>(), Language::JAVA);
worker_pool_.PushWorker(java_worker);
// Check that the worker will be popped now for Java task
ASSERT_NE(worker_pool_.PopWorker(java_task_spec), nullptr);
}
TEST_F(WorkerPoolTest, StartWorkerWithDynamicOptionsCommand) {
const std::vector<std::string> java_worker_command = {
"RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_0", "dummy_java_worker_command",
"--foo=RAY_WORKER_NUM_WORKERS_PLACEHOLDER",
"RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_1"};
SetWorkerCommands({{Language::PYTHON, {"dummy_py_worker_command"}},
{Language::JAVA, java_worker_command}});
const auto job_id = JobID::FromInt(1);
TaskSpecification task_spec = ExampleTaskSpec(
ActorID::Nil(), Language::JAVA,
ActorID::Of(job_id, TaskID::ForDriverTask(job_id), 1), {"test_op_0", "test_op_1"});
worker_pool_.StartWorkerProcess(Language::JAVA, task_spec.DynamicWorkerOptions());
const auto real_command =
worker_pool_.GetWorkerCommand(worker_pool_.LastStartedWorkerProcess());
ASSERT_EQ(real_command,
std::vector<std::string>(
{"test_op_0", "dummy_java_worker_command", "--foo=1", "test_op_1"}));
}
} // namespace raylet
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/client_call.h
|
C/C++ Header
|
#ifndef RAY_RPC_CLIENT_CALL_H
#define RAY_RPC_CLIENT_CALL_H
#include <grpcpp/grpcpp.h>
#include <boost/asio.hpp>
#include "absl/synchronization/mutex.h"
#include "ray/common/grpc_util.h"
#include "ray/common/status.h"
namespace ray {
namespace rpc {
/// Represents an outgoing gRPC request.
///
/// NOTE(hchen): Compared to `ClientCallImpl`, this abstract interface doesn't use
/// template. This allows the users (e.g., `ClientCallMangager`) not having to use
/// template as well.
class ClientCall {
public:
/// The callback to be called by `ClientCallManager` when the reply of this request is
/// received.
virtual void OnReplyReceived() = 0;
/// Return status.
virtual ray::Status GetStatus() = 0;
/// Set return status.
virtual void SetReturnStatus() = 0;
virtual ~ClientCall() = default;
};
class ClientCallManager;
/// Represents the client callback function of a particular rpc method.
///
/// \tparam Reply Type of the reply message.
template <class Reply>
using ClientCallback = std::function<void(const Status &status, const Reply &reply)>;
/// Implementation of the `ClientCall`. It represents a `ClientCall` for a particular
/// RPC method.
///
/// \tparam Reply Type of the Reply message.
template <class Reply>
class ClientCallImpl : public ClientCall {
public:
/// Constructor.
///
/// \param[in] callback The callback function to handle the reply.
explicit ClientCallImpl(const ClientCallback<Reply> &callback) : callback_(callback) {}
Status GetStatus() override {
absl::MutexLock lock(&mutex_);
return return_status_;
}
void SetReturnStatus() override {
absl::MutexLock lock(&mutex_);
return_status_ = GrpcStatusToRayStatus(status_);
}
void OnReplyReceived() override {
ray::Status status;
{
absl::MutexLock lock(&mutex_);
status = return_status_;
}
if (callback_ != nullptr) {
callback_(status, reply_);
}
}
private:
/// The reply message.
Reply reply_;
/// The callback function to handle the reply.
ClientCallback<Reply> callback_;
/// The response reader.
std::unique_ptr<grpc_impl::ClientAsyncResponseReader<Reply>> response_reader_;
/// gRPC status of this request.
grpc::Status status_;
/// Mutex to protect the return_status_ field.
absl::Mutex mutex_;
/// This is the status to be returned from GetStatus(). It is safe
/// to read from other threads while they hold mutex_. We have
/// return_status_ = GrpcStatusToRayStatus(status_) but need
/// a separate variable because status_ is set internally by
/// GRPC and we cannot control it holding the lock.
ray::Status return_status_ GUARDED_BY(mutex_);
/// Context for the client. It could be used to convey extra information to
/// the server and/or tweak certain RPC behaviors.
grpc::ClientContext context_;
friend class ClientCallManager;
};
/// This class wraps a `ClientCall`, and is used as the `tag` of gRPC's `CompletionQueue`.
///
/// The lifecycle of a `ClientCallTag` is as follows.
///
/// When a client submits a new gRPC request, a new `ClientCallTag` object will be created
/// by `ClientCallMangager::CreateCall`. Then the object will be used as the tag of
/// `CompletionQueue`.
///
/// When the reply is received, `ClientCallMangager` will get the address of this object
/// via `CompletionQueue`'s tag. And the manager should call
/// `GetCall()->OnReplyReceived()` and then delete this object.
class ClientCallTag {
public:
/// Constructor.
///
/// \param call A `ClientCall` that represents a request.
explicit ClientCallTag(std::shared_ptr<ClientCall> call) : call_(std::move(call)) {}
/// Get the wrapped `ClientCall`.
const std::shared_ptr<ClientCall> &GetCall() const { return call_; }
private:
std::shared_ptr<ClientCall> call_;
};
/// Represents the generic signature of a `FooService::Stub::PrepareAsyncBar`
/// function, where `Foo` is the service name and `Bar` is the rpc method name.
///
/// \tparam GrpcService Type of the gRPC-generated service class.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
template <class GrpcService, class Request, class Reply>
using PrepareAsyncFunction =
std::unique_ptr<grpc_impl::ClientAsyncResponseReader<Reply>> (GrpcService::Stub::*)(
grpc::ClientContext *context, const Request &request, grpc::CompletionQueue *cq);
/// `ClientCallManager` is used to manage outgoing gRPC requests and the lifecycles of
/// `ClientCall` objects.
///
/// It maintains a thread that keeps polling events from `CompletionQueue`, and post
/// the callback function to the main event loop when a reply is received.
///
/// Multiple clients can share one `ClientCallManager`.
class ClientCallManager {
public:
/// Constructor.
///
/// \param[in] main_service The main event loop, to which the callback functions will be
/// posted.
explicit ClientCallManager(boost::asio::io_service &main_service, int num_threads = 1)
: main_service_(main_service), num_threads_(num_threads), shutdown_(false) {
rr_index_ = rand() % num_threads_;
// Start the polling threads.
cqs_.reserve(num_threads_);
for (int i = 0; i < num_threads_; i++) {
cqs_.emplace_back();
polling_threads_.emplace_back(&ClientCallManager::PollEventsFromCompletionQueue,
this, i);
}
}
~ClientCallManager() {
shutdown_ = true;
for (auto &cq : cqs_) {
cq.Shutdown();
}
for (auto &polling_thread : polling_threads_) {
polling_thread.join();
}
}
/// Create a new `ClientCall` and send request.
///
/// \tparam GrpcService Type of the gRPC-generated service class.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
///
/// \param[in] stub The gRPC-generated stub.
/// \param[in] prepare_async_function Pointer to the gRPC-generated
/// `FooService::Stub::PrepareAsyncBar` function.
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
///
/// \return A `ClientCall` representing the request that was just sent.
template <class GrpcService, class Request, class Reply>
std::shared_ptr<ClientCall> CreateCall(
typename GrpcService::Stub &stub,
const PrepareAsyncFunction<GrpcService, Request, Reply> prepare_async_function,
const Request &request, const ClientCallback<Reply> &callback) {
auto call = std::make_shared<ClientCallImpl<Reply>>(callback);
// Send request.
// Find the next completion queue to wait for response.
call->response_reader_ = (stub.*prepare_async_function)(
&call->context_, request, &cqs_[rr_index_++ % num_threads_]);
call->response_reader_->StartCall();
// Create a new tag object. This object will eventually be deleted in the
// `ClientCallManager::PollEventsFromCompletionQueue` when reply is received.
//
// NOTE(chen): Unlike `ServerCall`, we can't directly use `ClientCall` as the tag.
// Because this function must return a `shared_ptr` to make sure the returned
// `ClientCall` is safe to use. But `response_reader_->Finish` only accepts a raw
// pointer.
auto tag = new ClientCallTag(call);
call->response_reader_->Finish(&call->reply_, &call->status_, (void *)tag);
return call;
}
private:
/// This function runs in a background thread. It keeps polling events from the
/// `CompletionQueue`, and dispatches the event to the callbacks via the `ClientCall`
/// objects.
void PollEventsFromCompletionQueue(int index) {
void *got_tag;
bool ok = false;
// Keep reading events from the `CompletionQueue` until it's shutdown.
// NOTE(edoakes): we use AsyncNext here because for some unknown reason,
// synchronous cq_.Next blocks indefinitely in the case that the process
// received a SIGTERM.
while (true) {
auto deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(250, GPR_TIMESPAN));
auto status = cqs_[index].AsyncNext(&got_tag, &ok, deadline);
if (status == grpc::CompletionQueue::SHUTDOWN) {
break;
} else if (status == grpc::CompletionQueue::TIMEOUT && shutdown_) {
// If we timed out and shutdown, then exit immediately. This should not
// be needed, but gRPC seems to not return SHUTDOWN correctly in these
// cases (e.g., test_wait will hang on shutdown without this check).
break;
} else if (status != grpc::CompletionQueue::TIMEOUT) {
auto tag = reinterpret_cast<ClientCallTag *>(got_tag);
tag->GetCall()->SetReturnStatus();
if (ok && !main_service_.stopped() && !shutdown_) {
// Post the callback to the main event loop.
main_service_.post([tag]() {
tag->GetCall()->OnReplyReceived();
// The call is finished, and we can delete this tag now.
delete tag;
});
} else {
delete tag;
}
}
}
}
/// The main event loop, to which the callback functions will be posted.
boost::asio::io_service &main_service_;
/// The number of polling threads.
int num_threads_;
/// Whether the client has shutdown.
std::atomic<bool> shutdown_;
/// The index to send RPCs in a round-robin fashion
std::atomic<unsigned int> rr_index_;
/// The gRPC `CompletionQueue` object used to poll events.
std::vector<grpc::CompletionQueue> cqs_;
/// Polling threads to check the completion queue.
std::vector<std::thread> polling_threads_;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/gcs_server/gcs_rpc_client.h
|
C/C++ Header
|
#ifndef RAY_RPC_GCS_RPC_CLIENT_H
#define RAY_RPC_GCS_RPC_CLIENT_H
#include "src/ray/rpc/grpc_client.h"
namespace ray {
namespace rpc {
/// Client used for communicating with gcs server.
class GcsRpcClient {
public:
/// Constructor.
///
/// \param[in] address Address of gcs server.
/// \param[in] port Port of the gcs server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
GcsRpcClient(const std::string &address, const int port,
ClientCallManager &client_call_manager) {
job_info_grpc_client_ = std::unique_ptr<GrpcClient<JobInfoGcsService>>(
new GrpcClient<JobInfoGcsService>(address, port, client_call_manager));
actor_info_grpc_client_ = std::unique_ptr<GrpcClient<ActorInfoGcsService>>(
new GrpcClient<ActorInfoGcsService>(address, port, client_call_manager));
node_info_grpc_client_ = std::unique_ptr<GrpcClient<NodeInfoGcsService>>(
new GrpcClient<NodeInfoGcsService>(address, port, client_call_manager));
object_info_grpc_client_ = std::unique_ptr<GrpcClient<ObjectInfoGcsService>>(
new GrpcClient<ObjectInfoGcsService>(address, port, client_call_manager));
task_info_grpc_client_ = std::unique_ptr<GrpcClient<TaskInfoGcsService>>(
new GrpcClient<TaskInfoGcsService>(address, port, client_call_manager));
stats_grpc_client_ = std::unique_ptr<GrpcClient<StatsGcsService>>(
new GrpcClient<StatsGcsService>(address, port, client_call_manager));
error_info_grpc_client_ = std::unique_ptr<GrpcClient<ErrorInfoGcsService>>(
new GrpcClient<ErrorInfoGcsService>(address, port, client_call_manager));
worker_info_grpc_client_ = std::unique_ptr<GrpcClient<WorkerInfoGcsService>>(
new GrpcClient<WorkerInfoGcsService>(address, port, client_call_manager));
};
/// Add job info to gcs server.
VOID_RPC_CLIENT_METHOD(JobInfoGcsService, AddJob, job_info_grpc_client_, )
/// Mark job as finished to gcs server.
VOID_RPC_CLIENT_METHOD(JobInfoGcsService, MarkJobFinished, job_info_grpc_client_, )
/// Get actor data from GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, GetActorInfo, actor_info_grpc_client_, )
/// Register an actor to GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, RegisterActorInfo,
actor_info_grpc_client_, )
/// Update actor info in GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, UpdateActorInfo, actor_info_grpc_client_, )
/// Add actor checkpoint data to GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, AddActorCheckpoint,
actor_info_grpc_client_, )
/// Get actor checkpoint data from GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, GetActorCheckpoint,
actor_info_grpc_client_, )
/// Get actor checkpoint id data from GCS Service.
VOID_RPC_CLIENT_METHOD(ActorInfoGcsService, GetActorCheckpointID,
actor_info_grpc_client_, )
/// Register a node to GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, RegisterNode, node_info_grpc_client_, )
/// Unregister a node from GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, UnregisterNode, node_info_grpc_client_, )
/// Get information of all nodes from GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, GetAllNodeInfo, node_info_grpc_client_, )
/// Report heartbeat of a node to GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, ReportHeartbeat, node_info_grpc_client_, )
/// Report batch heartbeat to GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, ReportBatchHeartbeat,
node_info_grpc_client_, )
/// Get node's resources from GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, GetResources, node_info_grpc_client_, )
/// Update resources of a node in GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, UpdateResources, node_info_grpc_client_, )
/// Delete resources of a node in GCS Service.
VOID_RPC_CLIENT_METHOD(NodeInfoGcsService, DeleteResources, node_info_grpc_client_, )
/// Get object's locations from GCS Service.
VOID_RPC_CLIENT_METHOD(ObjectInfoGcsService, GetObjectLocations,
object_info_grpc_client_, )
/// Add location of object to GCS Service.
VOID_RPC_CLIENT_METHOD(ObjectInfoGcsService, AddObjectLocation,
object_info_grpc_client_, )
/// Remove location of object to GCS Service.
VOID_RPC_CLIENT_METHOD(ObjectInfoGcsService, RemoveObjectLocation,
object_info_grpc_client_, )
/// Add a task to GCS Service.
VOID_RPC_CLIENT_METHOD(TaskInfoGcsService, AddTask, task_info_grpc_client_, )
/// Get task information from GCS Service.
VOID_RPC_CLIENT_METHOD(TaskInfoGcsService, GetTask, task_info_grpc_client_, )
/// Delete tasks from GCS Service.
VOID_RPC_CLIENT_METHOD(TaskInfoGcsService, DeleteTasks, task_info_grpc_client_, )
/// Add a task lease to GCS Service.
VOID_RPC_CLIENT_METHOD(TaskInfoGcsService, AddTaskLease, task_info_grpc_client_, )
/// Attempt task reconstruction to GCS Service.
VOID_RPC_CLIENT_METHOD(TaskInfoGcsService, AttemptTaskReconstruction,
task_info_grpc_client_, )
/// Add profile data to GCS Service.
VOID_RPC_CLIENT_METHOD(StatsGcsService, AddProfileData, stats_grpc_client_, )
/// Report a job error to GCS Service.
VOID_RPC_CLIENT_METHOD(ErrorInfoGcsService, ReportJobError, error_info_grpc_client_, )
/// Report a worker failure to GCS Service.
VOID_RPC_CLIENT_METHOD(WorkerInfoGcsService, ReportWorkerFailure,
worker_info_grpc_client_, )
private:
/// The gRPC-generated stub.
std::unique_ptr<GrpcClient<JobInfoGcsService>> job_info_grpc_client_;
std::unique_ptr<GrpcClient<ActorInfoGcsService>> actor_info_grpc_client_;
std::unique_ptr<GrpcClient<NodeInfoGcsService>> node_info_grpc_client_;
std::unique_ptr<GrpcClient<ObjectInfoGcsService>> object_info_grpc_client_;
std::unique_ptr<GrpcClient<TaskInfoGcsService>> task_info_grpc_client_;
std::unique_ptr<GrpcClient<StatsGcsService>> stats_grpc_client_;
std::unique_ptr<GrpcClient<ErrorInfoGcsService>> error_info_grpc_client_;
std::unique_ptr<GrpcClient<WorkerInfoGcsService>> worker_info_grpc_client_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_GCS_RPC_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/gcs_server/gcs_rpc_server.h
|
C/C++ Header
|
#ifndef RAY_RPC_GCS_RPC_SERVER_H
#define RAY_RPC_GCS_RPC_SERVER_H
#include "src/ray/rpc/grpc_server.h"
#include "src/ray/rpc/server_call.h"
#include "src/ray/protobuf/gcs_service.grpc.pb.h"
namespace ray {
namespace rpc {
#define JOB_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(JobInfoGcsService, HANDLER, CONCURRENCY)
#define ACTOR_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(ActorInfoGcsService, HANDLER, CONCURRENCY)
#define NODE_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(NodeInfoGcsService, HANDLER, CONCURRENCY)
#define OBJECT_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(ObjectInfoGcsService, HANDLER, CONCURRENCY)
#define TASK_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(TaskInfoGcsService, HANDLER, CONCURRENCY)
#define STATS_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(StatsGcsService, HANDLER, CONCURRENCY)
#define ERROR_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(ErrorInfoGcsService, HANDLER, CONCURRENCY)
#define WORKER_INFO_SERVICE_RPC_HANDLER(HANDLER, CONCURRENCY) \
RPC_SERVICE_HANDLER(WorkerInfoGcsService, HANDLER, CONCURRENCY)
class JobInfoGcsServiceHandler {
public:
virtual ~JobInfoGcsServiceHandler() = default;
virtual void HandleAddJob(const AddJobRequest &request, AddJobReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleMarkJobFinished(const MarkJobFinishedRequest &request,
MarkJobFinishedReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `JobInfoGcsService`.
class JobInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit JobInfoGrpcService(boost::asio::io_service &io_service,
JobInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
JOB_INFO_SERVICE_RPC_HANDLER(AddJob, 1);
JOB_INFO_SERVICE_RPC_HANDLER(MarkJobFinished, 1);
}
private:
/// The grpc async service object.
JobInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
JobInfoGcsServiceHandler &service_handler_;
};
class ActorInfoGcsServiceHandler {
public:
virtual ~ActorInfoGcsServiceHandler() = default;
virtual void HandleGetActorInfo(const GetActorInfoRequest &request,
GetActorInfoReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleRegisterActorInfo(const RegisterActorInfoRequest &request,
RegisterActorInfoReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleUpdateActorInfo(const UpdateActorInfoRequest &request,
UpdateActorInfoReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleAddActorCheckpoint(const AddActorCheckpointRequest &request,
AddActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetActorCheckpoint(const GetActorCheckpointRequest &request,
GetActorCheckpointReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetActorCheckpointID(const GetActorCheckpointIDRequest &request,
GetActorCheckpointIDReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `ActorInfoGcsService`.
class ActorInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit ActorInfoGrpcService(boost::asio::io_service &io_service,
ActorInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
ACTOR_INFO_SERVICE_RPC_HANDLER(GetActorInfo, 1);
ACTOR_INFO_SERVICE_RPC_HANDLER(RegisterActorInfo, 1);
ACTOR_INFO_SERVICE_RPC_HANDLER(UpdateActorInfo, 1);
ACTOR_INFO_SERVICE_RPC_HANDLER(AddActorCheckpoint, 1);
ACTOR_INFO_SERVICE_RPC_HANDLER(GetActorCheckpoint, 1);
ACTOR_INFO_SERVICE_RPC_HANDLER(GetActorCheckpointID, 1);
}
private:
/// The grpc async service object.
ActorInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
ActorInfoGcsServiceHandler &service_handler_;
};
class NodeInfoGcsServiceHandler {
public:
virtual ~NodeInfoGcsServiceHandler() = default;
virtual void HandleRegisterNode(const RegisterNodeRequest &request,
RegisterNodeReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleUnregisterNode(const UnregisterNodeRequest &request,
UnregisterNodeReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetAllNodeInfo(const GetAllNodeInfoRequest &request,
GetAllNodeInfoReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleReportHeartbeat(const ReportHeartbeatRequest &request,
ReportHeartbeatReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleReportBatchHeartbeat(const ReportBatchHeartbeatRequest &request,
ReportBatchHeartbeatReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetResources(const GetResourcesRequest &request,
GetResourcesReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleUpdateResources(const UpdateResourcesRequest &request,
UpdateResourcesReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleDeleteResources(const DeleteResourcesRequest &request,
DeleteResourcesReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `NodeInfoGcsService`.
class NodeInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit NodeInfoGrpcService(boost::asio::io_service &io_service,
NodeInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
NODE_INFO_SERVICE_RPC_HANDLER(RegisterNode, 1);
NODE_INFO_SERVICE_RPC_HANDLER(UnregisterNode, 1);
NODE_INFO_SERVICE_RPC_HANDLER(GetAllNodeInfo, 1);
NODE_INFO_SERVICE_RPC_HANDLER(ReportHeartbeat, 1);
NODE_INFO_SERVICE_RPC_HANDLER(ReportBatchHeartbeat, 1);
NODE_INFO_SERVICE_RPC_HANDLER(GetResources, 1);
NODE_INFO_SERVICE_RPC_HANDLER(UpdateResources, 1);
NODE_INFO_SERVICE_RPC_HANDLER(DeleteResources, 1);
}
private:
/// The grpc async service object.
NodeInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
NodeInfoGcsServiceHandler &service_handler_;
};
class ObjectInfoGcsServiceHandler {
public:
virtual ~ObjectInfoGcsServiceHandler() = default;
virtual void HandleGetObjectLocations(const GetObjectLocationsRequest &request,
GetObjectLocationsReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleAddObjectLocation(const AddObjectLocationRequest &request,
AddObjectLocationReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleRemoveObjectLocation(const RemoveObjectLocationRequest &request,
RemoveObjectLocationReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `ObjectInfoGcsServiceHandler`.
class ObjectInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit ObjectInfoGrpcService(boost::asio::io_service &io_service,
ObjectInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
OBJECT_INFO_SERVICE_RPC_HANDLER(GetObjectLocations, 1);
OBJECT_INFO_SERVICE_RPC_HANDLER(AddObjectLocation, 1);
OBJECT_INFO_SERVICE_RPC_HANDLER(RemoveObjectLocation, 1);
}
private:
/// The grpc async service object.
ObjectInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
ObjectInfoGcsServiceHandler &service_handler_;
};
class TaskInfoGcsServiceHandler {
public:
virtual ~TaskInfoGcsServiceHandler() = default;
virtual void HandleAddTask(const AddTaskRequest &request, AddTaskReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetTask(const GetTaskRequest &request, GetTaskReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleDeleteTasks(const DeleteTasksRequest &request,
DeleteTasksReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleAddTaskLease(const AddTaskLeaseRequest &request,
AddTaskLeaseReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleAttemptTaskReconstruction(
const AttemptTaskReconstructionRequest &request,
AttemptTaskReconstructionReply *reply, SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `TaskInfoGcsService`.
class TaskInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit TaskInfoGrpcService(boost::asio::io_service &io_service,
TaskInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
TASK_INFO_SERVICE_RPC_HANDLER(AddTask, 1);
TASK_INFO_SERVICE_RPC_HANDLER(GetTask, 1);
TASK_INFO_SERVICE_RPC_HANDLER(DeleteTasks, 1);
TASK_INFO_SERVICE_RPC_HANDLER(AddTaskLease, 1);
TASK_INFO_SERVICE_RPC_HANDLER(AttemptTaskReconstruction, 1);
}
private:
/// The grpc async service object.
TaskInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
TaskInfoGcsServiceHandler &service_handler_;
};
class StatsGcsServiceHandler {
public:
virtual ~StatsGcsServiceHandler() = default;
virtual void HandleAddProfileData(const AddProfileDataRequest &request,
AddProfileDataReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `StatsGcsService`.
class StatsGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit StatsGrpcService(boost::asio::io_service &io_service,
StatsGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
STATS_SERVICE_RPC_HANDLER(AddProfileData, 1);
}
private:
/// The grpc async service object.
StatsGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
StatsGcsServiceHandler &service_handler_;
};
class ErrorInfoGcsServiceHandler {
public:
virtual ~ErrorInfoGcsServiceHandler() = default;
virtual void HandleReportJobError(const ReportJobErrorRequest &request,
ReportJobErrorReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `ErrorInfoGcsService`.
class ErrorInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit ErrorInfoGrpcService(boost::asio::io_service &io_service,
ErrorInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
ERROR_INFO_SERVICE_RPC_HANDLER(ReportJobError, 1);
}
private:
/// The grpc async service object.
ErrorInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
ErrorInfoGcsServiceHandler &service_handler_;
};
class WorkerInfoGcsServiceHandler {
public:
virtual ~WorkerInfoGcsServiceHandler() = default;
virtual void HandleReportWorkerFailure(const ReportWorkerFailureRequest &request,
ReportWorkerFailureReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `WorkerInfoGcsService`.
class WorkerInfoGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] handler The service handler that actually handle the requests.
explicit WorkerInfoGrpcService(boost::asio::io_service &io_service,
WorkerInfoGcsServiceHandler &handler)
: GrpcService(io_service), service_handler_(handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
WORKER_INFO_SERVICE_RPC_HANDLER(ReportWorkerFailure, 1);
}
private:
/// The grpc async service object.
WorkerInfoGcsService::AsyncService service_;
/// The service handler that actually handle the requests.
WorkerInfoGcsServiceHandler &service_handler_;
};
using JobInfoHandler = JobInfoGcsServiceHandler;
using ActorInfoHandler = ActorInfoGcsServiceHandler;
using NodeInfoHandler = NodeInfoGcsServiceHandler;
using ObjectInfoHandler = ObjectInfoGcsServiceHandler;
using TaskInfoHandler = TaskInfoGcsServiceHandler;
using StatsHandler = StatsGcsServiceHandler;
using ErrorInfoHandler = ErrorInfoGcsServiceHandler;
using WorkerInfoHandler = WorkerInfoGcsServiceHandler;
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_GCS_RPC_SERVER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/grpc_client.h
|
C/C++ Header
|
#ifndef RAY_RPC_GRPC_CLIENT_H
#define RAY_RPC_GRPC_CLIENT_H
#include <grpcpp/grpcpp.h>
#include <boost/asio.hpp>
#include "ray/common/grpc_util.h"
#include "ray/common/status.h"
#include "ray/rpc/client_call.h"
namespace ray {
namespace rpc {
// This macro wraps the logic to call a specific RPC method of a service,
// to make it easier to implement a new RPC client.
#define INVOKE_RPC_CALL(SERVICE, METHOD, request, callback, rpc_client) \
({ \
rpc_client->CallMethod<METHOD##Request, METHOD##Reply>( \
&SERVICE::Stub::PrepareAsync##METHOD, request, callback); \
})
// Define a void RPC client method.
#define VOID_RPC_CLIENT_METHOD(SERVICE, METHOD, rpc_client, SPECS) \
void METHOD(const METHOD##Request &request, \
const ClientCallback<METHOD##Reply> &callback) SPECS { \
RAY_UNUSED(INVOKE_RPC_CALL(SERVICE, METHOD, request, callback, rpc_client)); \
}
// Define a RPC client method that returns ray::Status.
#define RPC_CLIENT_METHOD(SERVICE, METHOD, rpc_client, SPECS) \
ray::Status METHOD(const METHOD##Request &request, \
const ClientCallback<METHOD##Reply> &callback) SPECS { \
return INVOKE_RPC_CALL(SERVICE, METHOD, request, callback, rpc_client); \
}
template <class GrpcService>
class GrpcClient {
public:
GrpcClient(const std::string &address, const int port, ClientCallManager &call_manager)
: client_call_manager_(call_manager) {
grpc::ChannelArguments argument;
// Disable http proxy since it disrupts local connections. TODO(ekl) we should make
// this configurable, or selectively set it for known local connections only.
argument.SetInt(GRPC_ARG_ENABLE_HTTP_PROXY, 0);
std::shared_ptr<grpc::Channel> channel =
grpc::CreateCustomChannel(address + ":" + std::to_string(port),
grpc::InsecureChannelCredentials(), argument);
stub_ = GrpcService::NewStub(channel);
}
GrpcClient(const std::string &address, const int port, ClientCallManager &call_manager,
int num_threads)
: client_call_manager_(call_manager) {
grpc::ResourceQuota quota;
quota.SetMaxThreads(num_threads);
grpc::ChannelArguments argument;
argument.SetResourceQuota(quota);
argument.SetInt(GRPC_ARG_ENABLE_HTTP_PROXY, 0);
std::shared_ptr<grpc::Channel> channel =
grpc::CreateCustomChannel(address + ":" + std::to_string(port),
grpc::InsecureChannelCredentials(), argument);
stub_ = GrpcService::NewStub(channel);
}
/// Create a new `ClientCall` and send request.
///
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
///
/// \param[in] prepare_async_function Pointer to the gRPC-generated
/// `FooService::Stub::PrepareAsyncBar` function.
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
///
/// \return Status.
template <class Request, class Reply>
ray::Status CallMethod(
const PrepareAsyncFunction<GrpcService, Request, Reply> prepare_async_function,
const Request &request, const ClientCallback<Reply> &callback) {
auto call = client_call_manager_.CreateCall<GrpcService, Request, Reply>(
*stub_, prepare_async_function, request, callback);
return call->GetStatus();
}
private:
ClientCallManager &client_call_manager_;
/// The gRPC-generated stub.
std::unique_ptr<typename GrpcService::Stub> stub_;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/grpc_server.cc
|
C++
|
#include "src/ray/rpc/grpc_server.h"
#include <grpcpp/impl/service_type.h>
#include <boost/asio/detail/socket_holder.hpp>
namespace ray {
namespace rpc {
GrpcServer::GrpcServer(std::string name, const uint32_t port, int num_threads)
: name_(std::move(name)), port_(port), is_closed_(true), num_threads_(num_threads) {
cqs_.reserve(num_threads_);
}
void GrpcServer::Run() {
uint32_t specified_port = port_;
std::string server_address("0.0.0.0:" + std::to_string(port_));
grpc::ServerBuilder builder;
// Disable the SO_REUSEPORT option. We don't need it in ray. If the option is enabled
// (default behavior in grpc), we may see multiple workers listen on the same port and
// the requests sent to this port may be handled by any of the workers.
builder.AddChannelArgument(GRPC_ARG_ALLOW_REUSEPORT, 0);
// TODO(hchen): Add options for authentication.
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials(), &port_);
// Register all the services to this server.
if (services_.empty()) {
RAY_LOG(WARNING) << "No service is found when start grpc server " << name_;
}
for (auto &entry : services_) {
builder.RegisterService(&entry.get());
}
// Get hold of the completion queue used for the asynchronous communication
// with the gRPC runtime.
for (int i = 0; i < num_threads_; i++) {
cqs_.push_back(builder.AddCompletionQueue());
}
// Build and start server.
server_ = builder.BuildAndStart();
// If the grpc server failed to bind the port, the `port_` will be set to 0.
RAY_CHECK(port_ > 0)
<< "Port " << specified_port
<< " specified by caller already in use. Try passing node_manager_port=... into "
"ray.init() to pick a specific port";
RAY_LOG(INFO) << name_ << " server started, listening on port " << port_ << ".";
// Create calls for all the server call factories.
for (auto &entry : server_call_factories_and_concurrencies_) {
for (int i = 0; i < entry.second; i++) {
// Create and request calls from the factory.
entry.first->CreateCall();
}
}
// Start threads that polls incoming requests.
for (int i = 0; i < num_threads_; i++) {
polling_threads_.emplace_back(&GrpcServer::PollEventsFromCompletionQueue, this, i);
}
// Set the server as running.
is_closed_ = false;
}
void GrpcServer::RegisterService(GrpcService &service) {
services_.emplace_back(service.GetGrpcService());
for (int i = 0; i < num_threads_; i++) {
service.InitServerCallFactories(cqs_[i], &server_call_factories_and_concurrencies_);
}
}
void GrpcServer::PollEventsFromCompletionQueue(int index) {
void *tag;
bool ok;
// Keep reading events from the `CompletionQueue` until it's shutdown.
while (cqs_[index]->Next(&tag, &ok)) {
auto *server_call = static_cast<ServerCall *>(tag);
bool delete_call = false;
if (ok) {
switch (server_call->GetState()) {
case ServerCallState::PENDING:
// We've received a new incoming request. Now this call object is used to
// track this request.
server_call->SetState(ServerCallState::PROCESSING);
server_call->HandleRequest();
break;
case ServerCallState::SENDING_REPLY:
// GRPC has sent reply successfully, invoking the callback.
server_call->OnReplySent();
// The rpc call has finished and can be deleted now.
delete_call = true;
break;
default:
RAY_LOG(FATAL) << "Shouldn't reach here.";
break;
}
} else {
// `ok == false` will occur in two situations:
// First, the server has been shut down, the server call's status is PENDING
// Second, server has sent reply to client and failed, the server call's status is
// SENDING_REPLY
if (server_call->GetState() == ServerCallState::SENDING_REPLY) {
server_call->OnReplyFailed();
}
delete_call = true;
}
if (delete_call) {
delete server_call;
}
}
}
} // namespace rpc
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/grpc_server.h
|
C/C++ Header
|
#ifndef RAY_RPC_GRPC_SERVER_H
#define RAY_RPC_GRPC_SERVER_H
#include <grpcpp/grpcpp.h>
#include <boost/asio.hpp>
#include <thread>
#include <utility>
#include "ray/common/status.h"
#include "ray/rpc/server_call.h"
namespace ray {
namespace rpc {
#define RPC_SERVICE_HANDLER(SERVICE, HANDLER, CONCURRENCY) \
std::unique_ptr<ServerCallFactory> HANDLER##_call_factory( \
new ServerCallFactoryImpl<SERVICE, SERVICE##Handler, HANDLER##Request, \
HANDLER##Reply>( \
service_, &SERVICE::AsyncService::Request##HANDLER, service_handler_, \
&SERVICE##Handler::Handle##HANDLER, cq, main_service_)); \
server_call_factories_and_concurrencies->emplace_back( \
std::move(HANDLER##_call_factory), CONCURRENCY);
// Define a void RPC client method.
#define DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(METHOD) \
virtual void Handle##METHOD(const rpc::METHOD##Request &request, \
rpc::METHOD##Reply *reply, \
rpc::SendReplyCallback send_reply_callback) = 0;
class GrpcService;
/// Class that represents an gRPC server.
///
/// A `GrpcServer` listens on a specific port. It owns
/// 1) a `ServerCompletionQueue` that is used for polling events from gRPC,
/// 2) and a thread that polls events from the `ServerCompletionQueue`.
///
/// Subclasses can register one or multiple services to a `GrpcServer`, see
/// `RegisterServices`. And they should also implement `InitServerCallFactories` to decide
/// which kinds of requests this server should accept.
class GrpcServer {
public:
/// Construct a gRPC server that listens on a TCP port.
///
/// \param[in] name Name of this server, used for logging and debugging purpose.
/// \param[in] port The port to bind this server to. If it's 0, a random available port
/// will be chosen.
GrpcServer(std::string name, const uint32_t port, int num_threads = 1);
/// Destruct this gRPC server.
~GrpcServer() { Shutdown(); }
/// Initialize and run this server.
void Run();
// Shutdown this server
void Shutdown() {
if (!is_closed_) {
// Shutdown the server with an immediate deadline.
// TODO(edoakes): do we want to do this in all cases?
server_->Shutdown(gpr_now(GPR_CLOCK_REALTIME));
for (const auto &cq : cqs_) {
cq->Shutdown();
}
for (auto &polling_thread : polling_threads_) {
polling_thread.join();
}
is_closed_ = true;
RAY_LOG(DEBUG) << "gRPC server of " << name_ << " shutdown.";
}
}
/// Get the port of this gRPC server.
int GetPort() const { return port_; }
/// Register a grpc service. Multiple services can be registered to the same server.
/// Note that the `service` registered must remain valid for the lifetime of the
/// `GrpcServer`, as it holds the underlying `grpc::Service`.
///
/// \param[in] service A `GrpcService` to register to this server.
void RegisterService(GrpcService &service);
protected:
/// This function runs in a background thread. It keeps polling events from the
/// `ServerCompletionQueue`, and dispaches the event to the `ServiceHandler` instances
/// via the `ServerCall` objects.
void PollEventsFromCompletionQueue(int index);
/// Name of this server, used for logging and debugging purpose.
const std::string name_;
/// Port of this server.
int port_;
/// Indicates whether this server has been closed.
bool is_closed_;
/// The `grpc::Service` objects which should be registered to `ServerBuilder`.
std::vector<std::reference_wrapper<grpc::Service>> services_;
/// The `ServerCallFactory` objects, and the maximum number of concurrent requests that
/// this gRPC server can handle.
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
server_call_factories_and_concurrencies_;
/// The number of completion queues the server is polling from.
int num_threads_;
/// The `ServerCompletionQueue` object used for polling events.
std::vector<std::unique_ptr<grpc::ServerCompletionQueue>> cqs_;
/// The `Server` object.
std::unique_ptr<grpc::Server> server_;
/// The polling threads used to check the completion queues.
std::vector<std::thread> polling_threads_;
};
/// Base class that represents an abstract gRPC service.
///
/// Subclass should implement `InitServerCallFactories` to decide
/// which kinds of requests this service should accept.
class GrpcService {
public:
/// Constructor.
///
/// \param[in] main_service The main event loop, to which service handler functions
/// will be posted.
explicit GrpcService(boost::asio::io_service &main_service)
: main_service_(main_service) {}
/// Destruct this gRPC service.
~GrpcService() = default;
protected:
/// Return the underlying grpc::Service object for this class.
/// This is passed to `GrpcServer` to be registered to grpc `ServerBuilder`.
virtual grpc::Service &GetGrpcService() = 0;
/// Subclasses should implement this method to initialize the `ServerCallFactory`
/// instances, as well as specify maximum number of concurrent requests that gRPC
/// server can handle.
///
/// \param[in] cq The grpc completion queue.
/// \param[out] server_call_factories_and_concurrencies The `ServerCallFactory` objects,
/// and the maximum number of concurrent requests that this gRPC server can handle.
virtual void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) = 0;
/// The main event loop, to which the service handler functions will be posted.
boost::asio::io_service &main_service_;
friend class GrpcServer;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/node_manager/node_manager_client.h
|
C/C++ Header
|
#ifndef RAY_RPC_NODE_MANAGER_CLIENT_H
#define RAY_RPC_NODE_MANAGER_CLIENT_H
#include <grpcpp/grpcpp.h>
#include <thread>
#include "ray/common/status.h"
#include "ray/rpc/grpc_client.h"
#include "ray/util/logging.h"
#include "src/ray/protobuf/node_manager.grpc.pb.h"
#include "src/ray/protobuf/node_manager.pb.h"
namespace ray {
namespace rpc {
/// Client used for communicating with a remote node manager server.
class NodeManagerClient {
public:
/// Constructor.
///
/// \param[in] address Address of the node manager server.
/// \param[in] port Port of the node manager server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
NodeManagerClient(const std::string &address, const int port,
ClientCallManager &client_call_manager)
: client_call_manager_(client_call_manager) {
grpc_client_ = std::unique_ptr<GrpcClient<NodeManagerService>>(
new GrpcClient<NodeManagerService>(address, port, client_call_manager));
};
/// Forward a task and its uncommitted lineage.
///
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
VOID_RPC_CLIENT_METHOD(NodeManagerService, ForwardTask, grpc_client_, )
/// Get current node stats.
VOID_RPC_CLIENT_METHOD(NodeManagerService, GetNodeStats, grpc_client_, )
void GetNodeStats(const ClientCallback<GetNodeStatsReply> &callback) {
GetNodeStatsRequest request;
GetNodeStats(request, callback);
}
private:
/// The RPC client.
std::unique_ptr<GrpcClient<NodeManagerService>> grpc_client_;
/// The `ClientCallManager` used for managing requests.
ClientCallManager &client_call_manager_;
};
/// Client used by workers for communicating with a node manager server.
class NodeManagerWorkerClient
: public std::enable_shared_from_this<NodeManagerWorkerClient> {
public:
/// Constructor.
///
/// \param[in] address Address of the node manager server.
/// \param[in] port Port of the node manager server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
static std::shared_ptr<NodeManagerWorkerClient> make(
const std::string &address, const int port,
ClientCallManager &client_call_manager) {
auto instance = new NodeManagerWorkerClient(address, port, client_call_manager);
return std::shared_ptr<NodeManagerWorkerClient>(instance);
}
/// Request a worker lease.
RPC_CLIENT_METHOD(NodeManagerService, RequestWorkerLease, grpc_client_, )
/// Return a worker lease.
RPC_CLIENT_METHOD(NodeManagerService, ReturnWorker, grpc_client_, )
/// Notify the raylet to pin the provided object IDs.
RPC_CLIENT_METHOD(NodeManagerService, PinObjectIDs, grpc_client_, )
private:
/// Constructor.
///
/// \param[in] address Address of the node manager server.
/// \param[in] port Port of the node manager server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
NodeManagerWorkerClient(const std::string &address, const int port,
ClientCallManager &client_call_manager)
: client_call_manager_(client_call_manager) {
grpc_client_ = std::unique_ptr<GrpcClient<NodeManagerService>>(
new GrpcClient<NodeManagerService>(address, port, client_call_manager));
};
/// The RPC client.
std::unique_ptr<GrpcClient<NodeManagerService>> grpc_client_;
/// The `ClientCallManager` used for managing requests.
ClientCallManager &client_call_manager_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_NODE_MANAGER_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/node_manager/node_manager_server.h
|
C/C++ Header
|
#ifndef RAY_RPC_NODE_MANAGER_SERVER_H
#define RAY_RPC_NODE_MANAGER_SERVER_H
#include "ray/rpc/grpc_server.h"
#include "ray/rpc/server_call.h"
#include "src/ray/protobuf/node_manager.grpc.pb.h"
#include "src/ray/protobuf/node_manager.pb.h"
namespace ray {
namespace rpc {
/// NOTE: See src/ray/core_worker/core_worker.h on how to add a new grpc handler.
#define RAY_NODE_MANAGER_RPC_HANDLERS \
RPC_SERVICE_HANDLER(NodeManagerService, RequestWorkerLease, 100) \
RPC_SERVICE_HANDLER(NodeManagerService, ReturnWorker, 100) \
RPC_SERVICE_HANDLER(NodeManagerService, ForwardTask, 100) \
RPC_SERVICE_HANDLER(NodeManagerService, PinObjectIDs, 100) \
RPC_SERVICE_HANDLER(NodeManagerService, GetNodeStats, 1)
/// Interface of the `NodeManagerService`, see `src/ray/protobuf/node_manager.proto`.
class NodeManagerServiceHandler {
public:
/// Handlers. For all of the following handlers, the implementations can
/// handle the request asynchronously. When handling is done, the
/// `send_reply_callback` should be called. See
/// src/ray/rpc/node_manager/node_manager_client.h and
/// src/ray/protobuf/node_manager.proto for a description of the
/// functionality of each handler.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
virtual void HandleRequestWorkerLease(const RequestWorkerLeaseRequest &request,
RequestWorkerLeaseReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleReturnWorker(const ReturnWorkerRequest &request,
ReturnWorkerReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleForwardTask(const ForwardTaskRequest &request,
ForwardTaskReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandlePinObjectIDs(const PinObjectIDsRequest &request,
PinObjectIDsReply *reply,
SendReplyCallback send_reply_callback) = 0;
virtual void HandleGetNodeStats(const GetNodeStatsRequest &request,
GetNodeStatsReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `NodeManagerService`.
class NodeManagerGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] io_service See super class.
/// \param[in] handler The service handler that actually handle the requests.
NodeManagerGrpcService(boost::asio::io_service &io_service,
NodeManagerServiceHandler &service_handler)
: GrpcService(io_service), service_handler_(service_handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
RAY_NODE_MANAGER_RPC_HANDLERS
}
private:
/// The grpc async service object.
NodeManagerService::AsyncService service_;
/// The service handler that actually handle the requests.
NodeManagerServiceHandler &service_handler_;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/object_manager/object_manager_client.h
|
C/C++ Header
|
#ifndef RAY_RPC_OBJECT_MANAGER_CLIENT_H
#define RAY_RPC_OBJECT_MANAGER_CLIENT_H
#include <thread>
#include <grpcpp/grpcpp.h>
#include <grpcpp/resource_quota.h>
#include <grpcpp/support/channel_arguments.h>
#include "ray/common/status.h"
#include "ray/util/logging.h"
#include "src/ray/protobuf/object_manager.grpc.pb.h"
#include "src/ray/protobuf/object_manager.pb.h"
#include "src/ray/rpc/grpc_client.h"
namespace ray {
namespace rpc {
/// Client used for communicating with a remote node manager server.
class ObjectManagerClient {
public:
/// Constructor.
///
/// \param[in] address Address of the node manager server.
/// \param[in] port Port of the node manager server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
ObjectManagerClient(const std::string &address, const int port,
ClientCallManager &client_call_manager, int num_connections = 4)
: client_call_manager_(client_call_manager), num_connections_(num_connections) {
push_rr_index_ = rand() % num_connections_;
pull_rr_index_ = rand() % num_connections_;
freeobjects_rr_index_ = rand() % num_connections_;
grpc_clients_.reserve(num_connections_);
for (int i = 0; i < num_connections_; i++) {
grpc_clients_.emplace_back(new GrpcClient<ObjectManagerService>(
address, port, client_call_manager, num_connections_));
}
};
/// Push object to remote object manager
///
/// \param request The request message.
/// \param callback The callback function that handles reply from server
VOID_RPC_CLIENT_METHOD(ObjectManagerService, Push,
grpc_clients_[push_rr_index_++ % num_connections_], )
/// Pull object from remote object manager
///
/// \param request The request message
/// \param callback The callback function that handles reply from server
VOID_RPC_CLIENT_METHOD(ObjectManagerService, Pull,
grpc_clients_[pull_rr_index_++ % num_connections_], )
/// Tell remote object manager to free objects
///
/// \param request The request message
/// \param callback The callback function that handles reply
VOID_RPC_CLIENT_METHOD(ObjectManagerService, FreeObjects,
grpc_clients_[freeobjects_rr_index_++ % num_connections_], )
private:
/// To optimize object manager performance we create multiple concurrent
/// GRPC connections, and use these connections in a round-robin way.
int num_connections_;
/// Current connection index for `Push`.
std::atomic<unsigned int> push_rr_index_;
/// Current connection index for `Pull`.
std::atomic<unsigned int> pull_rr_index_;
/// Current connection index for `FreeObjects`.
std::atomic<unsigned int> freeobjects_rr_index_;
/// The RPC clients.
std::vector<std::unique_ptr<GrpcClient<ObjectManagerService>>> grpc_clients_;
/// The `ClientCallManager` used for managing requests.
ClientCallManager &client_call_manager_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_OBJECT_MANAGER_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/object_manager/object_manager_server.h
|
C/C++ Header
|
#ifndef RAY_RPC_OBJECT_MANAGER_SERVER_H
#define RAY_RPC_OBJECT_MANAGER_SERVER_H
#include "src/ray/rpc/grpc_server.h"
#include "src/ray/rpc/server_call.h"
#include "src/ray/protobuf/object_manager.grpc.pb.h"
#include "src/ray/protobuf/object_manager.pb.h"
namespace ray {
namespace rpc {
#define RAY_OBJECT_MANAGER_RPC_HANDLERS \
RPC_SERVICE_HANDLER(ObjectManagerService, Push, 5) \
RPC_SERVICE_HANDLER(ObjectManagerService, Pull, 5) \
RPC_SERVICE_HANDLER(ObjectManagerService, FreeObjects, 2)
/// Implementations of the `ObjectManagerGrpcService`, check interface in
/// `src/ray/protobuf/object_manager.proto`.
class ObjectManagerServiceHandler {
public:
/// Handle a `Push` request.
/// The implementation can handle this request asynchronously. When handling is done,
/// the `send_reply_callback` should be called.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
virtual void HandlePush(const PushRequest &request, PushReply *reply,
SendReplyCallback send_reply_callback) = 0;
/// Handle a `Pull` request
virtual void HandlePull(const PullRequest &request, PullReply *reply,
SendReplyCallback send_reply_callback) = 0;
/// Handle a `FreeObjects` request
virtual void HandleFreeObjects(const FreeObjectsRequest &request,
FreeObjectsReply *reply,
SendReplyCallback send_reply_callback) = 0;
};
/// The `GrpcService` for `ObjectManagerGrpcService`.
class ObjectManagerGrpcService : public GrpcService {
public:
/// Construct a `ObjectManagerGrpcService`.
///
/// \param[in] port See `GrpcService`.
/// \param[in] handler The service handler that actually handle the requests.
ObjectManagerGrpcService(boost::asio::io_service &io_service,
ObjectManagerServiceHandler &service_handler)
: GrpcService(io_service), service_handler_(service_handler){};
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
RAY_OBJECT_MANAGER_RPC_HANDLERS
}
private:
/// The grpc async service object.
ObjectManagerService::AsyncService service_;
/// The service handler that actually handle the requests.
ObjectManagerServiceHandler &service_handler_;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/server_call.h
|
C/C++ Header
|
#ifndef RAY_RPC_SERVER_CALL_H
#define RAY_RPC_SERVER_CALL_H
#include <grpcpp/grpcpp.h>
#include <boost/asio.hpp>
#include "ray/common/grpc_util.h"
#include "ray/common/status.h"
namespace ray {
namespace rpc {
/// Represents the callback function to be called when a `ServiceHandler` finishes
/// handling a request.
/// \param status The status would be returned to client.
/// \param success Success callback which will be invoked when the reply is successfully
/// sent to the client.
/// \param failure Failure callback which will be invoked when the reply fails to be
/// sent to the client.
using SendReplyCallback = std::function<void(Status status, std::function<void()> success,
std::function<void()> failure)>;
/// Represents state of a `ServerCall`.
enum class ServerCallState {
/// The call is created and waiting for an incoming request.
PENDING,
/// Request is received and being processed.
PROCESSING,
/// Request processing is done, and reply is being sent to client.
SENDING_REPLY
};
class ServerCallFactory;
/// Represents an incoming request of a gRPC server.
///
/// The lifecycle and state transition of a `ServerCall` is as follows:
///
/// --(1)--> PENDING --(2)--> PROCESSING --(3)--> SENDING_REPLY --(4)--> [FINISHED]
///
/// (1) The `GrpcServer` creates a `ServerCall` and use it as the tag to accept requests
/// gRPC `CompletionQueue`. Now the state is `PENDING`.
/// (2) When a request is received, an event will be gotten from the `CompletionQueue`.
/// `GrpcServer` then should change `ServerCall`'s state to PROCESSING and call
/// `ServerCall::HandleRequest`.
/// (3) When the `ServiceHandler` finishes handling the request, `ServerCallImpl::Finish`
/// will be called, and the state becomes `SENDING_REPLY`.
/// (4) When the reply is sent, an event will be gotten from the `CompletionQueue`.
/// `GrpcServer` will then delete this call.
///
/// NOTE(hchen): Compared to `ServerCallImpl`, this abstract interface doesn't use
/// template. This allows the users (e.g., `GrpcServer`) not having to use
/// template as well.
class ServerCall {
public:
/// Get the state of this `ServerCall`.
virtual ServerCallState GetState() const = 0;
/// Set state of this `ServerCall`.
virtual void SetState(const ServerCallState &new_state) = 0;
/// Handle the requst. This is the callback function to be called by
/// `GrpcServer` when the request is received.
virtual void HandleRequest() = 0;
/// Invoked when sending reply successes.
virtual void OnReplySent() = 0;
// Invoked when sending reply fails.
virtual void OnReplyFailed() = 0;
/// Virtual destruct function to make sure subclass would destruct properly.
virtual ~ServerCall() = default;
};
/// The factory that creates a particular kind of `ServerCall` objects.
class ServerCallFactory {
public:
/// Create a new `ServerCall` and request gRPC runtime to start accepting the
/// corresponding type of requests.
///
/// \return Pointer to the `ServerCall` object.
virtual void CreateCall() const = 0;
virtual ~ServerCallFactory() = default;
};
/// Represents the generic signature of a `FooServiceHandler::HandleBar()`
/// function, where `Foo` is the service name and `Bar` is the rpc method name.
///
/// \tparam ServiceHandler Type of the handler that handles the request.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
template <class ServiceHandler, class Request, class Reply>
using HandleRequestFunction = void (ServiceHandler::*)(const Request &, Reply *,
SendReplyCallback);
/// Implementation of `ServerCall`. It represents `ServerCall` for a particular
/// RPC method.
///
/// \tparam ServiceHandler Type of the handler that handles the request.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
template <class ServiceHandler, class Request, class Reply>
class ServerCallImpl : public ServerCall {
public:
/// Constructor.
///
/// \param[in] factory The factory which created this call.
/// \param[in] service_handler The service handler that handles the request.
/// \param[in] handle_request_function Pointer to the service handler function.
/// \param[in] io_service The event loop.
ServerCallImpl(
const ServerCallFactory &factory, ServiceHandler &service_handler,
HandleRequestFunction<ServiceHandler, Request, Reply> handle_request_function,
boost::asio::io_service &io_service)
: state_(ServerCallState::PENDING),
factory_(factory),
service_handler_(service_handler),
handle_request_function_(handle_request_function),
response_writer_(&context_),
io_service_(io_service) {}
ServerCallState GetState() const override { return state_; }
void SetState(const ServerCallState &new_state) override { state_ = new_state; }
void HandleRequest() override {
if (!io_service_.stopped()) {
io_service_.post([this] { HandleRequestImpl(); });
} else {
// Handle service for rpc call has stopped, we must handle the call here
// to send reply and remove it from cq
RAY_LOG(DEBUG) << "Handle service has been closed.";
SendReply(Status::Invalid("HandleServiceClosed"));
}
}
void HandleRequestImpl() {
state_ = ServerCallState::PROCESSING;
// NOTE(hchen): This `factory` local variable is needed. Because `SendReply` runs in
// a different thread, and will cause `this` to be deleted.
const auto &factory = factory_;
(service_handler_.*handle_request_function_)(
request_, &reply_,
[this](Status status, std::function<void()> success,
std::function<void()> failure) {
// These two callbacks must be set before `SendReply`, because `SendReply`
// is async and this `ServerCall` might be deleted right after `SendReply`.
send_reply_success_callback_ = std::move(success);
send_reply_failure_callback_ = std::move(failure);
// When the handler is done with the request, tell gRPC to finish this request.
// Must send reply at the bottom of this callback, once we invoke this funciton,
// this server call might be deleted
SendReply(status);
});
// We've finished handling this request,
// create a new `ServerCall` to accept the next incoming request.
factory.CreateCall();
}
void OnReplySent() override {
if (send_reply_success_callback_ && !io_service_.stopped()) {
auto callback = std::move(send_reply_success_callback_);
io_service_.post([callback]() { callback(); });
}
}
void OnReplyFailed() override {
if (send_reply_failure_callback_ && !io_service_.stopped()) {
auto callback = std::move(send_reply_failure_callback_);
io_service_.post([callback]() { callback(); });
}
}
private:
/// Tell gRPC to finish this request and send reply asynchronously.
void SendReply(const Status &status) {
state_ = ServerCallState::SENDING_REPLY;
response_writer_.Finish(reply_, RayStatusToGrpcStatus(status), this);
}
/// State of this call.
ServerCallState state_;
/// The factory which created this call.
const ServerCallFactory &factory_;
/// The service handler that handles the request.
ServiceHandler &service_handler_;
/// Pointer to the service handler function.
HandleRequestFunction<ServiceHandler, Request, Reply> handle_request_function_;
/// Context for the request, allowing to tweak aspects of it such as the use
/// of compression, authentication, as well as to send metadata back to the client.
grpc::ServerContext context_;
/// The response writer.
grpc_impl::ServerAsyncResponseWriter<Reply> response_writer_;
/// The event loop.
boost::asio::io_service &io_service_;
/// The request message.
Request request_;
/// The reply message.
Reply reply_;
/// The callback when sending reply successes.
std::function<void()> send_reply_success_callback_ = nullptr;
/// The callback when sending reply fails.
std::function<void()> send_reply_failure_callback_ = nullptr;
template <class T1, class T2, class T3, class T4>
friend class ServerCallFactoryImpl;
};
/// Represents the generic signature of a `FooService::AsyncService::RequestBar()`
/// function, where `Foo` is the service name and `Bar` is the rpc method name.
/// \tparam GrpcService Type of the gRPC-generated service class.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
template <class GrpcService, class Request, class Reply>
using RequestCallFunction = void (GrpcService::AsyncService::*)(
grpc::ServerContext *, Request *, grpc_impl::ServerAsyncResponseWriter<Reply> *,
grpc::CompletionQueue *, grpc::ServerCompletionQueue *, void *);
/// Implementation of `ServerCallFactory`
///
/// \tparam GrpcService Type of the gRPC-generated service class.
/// \tparam ServiceHandler Type of the handler that handles the request.
/// \tparam Request Type of the request message.
/// \tparam Reply Type of the reply message.
template <class GrpcService, class ServiceHandler, class Request, class Reply>
class ServerCallFactoryImpl : public ServerCallFactory {
using AsyncService = typename GrpcService::AsyncService;
public:
/// Constructor.
///
/// \param[in] service The gRPC-generated `AsyncService`.
/// \param[in] request_call_function Pointer to the `AsyncService::RequestMethod`
// function.
/// \param[in] service_handler The service handler that handles the request.
/// \param[in] handle_request_function Pointer to the service handler function.
/// \param[in] cq The `CompletionQueue`.
/// \param[in] io_service The event loop.
ServerCallFactoryImpl(
AsyncService &service,
RequestCallFunction<GrpcService, Request, Reply> request_call_function,
ServiceHandler &service_handler,
HandleRequestFunction<ServiceHandler, Request, Reply> handle_request_function,
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
boost::asio::io_service &io_service)
: service_(service),
request_call_function_(request_call_function),
service_handler_(service_handler),
handle_request_function_(handle_request_function),
cq_(cq),
io_service_(io_service) {}
void CreateCall() const override {
// Create a new `ServerCall`. This object will eventually be deleted by
// `GrpcServer::PollEventsFromCompletionQueue`.
auto call = new ServerCallImpl<ServiceHandler, Request, Reply>(
*this, service_handler_, handle_request_function_, io_service_);
/// Request gRPC runtime to starting accepting this kind of request, using the call as
/// the tag.
(service_.*request_call_function_)(&call->context_, &call->request_,
&call->response_writer_, cq_.get(), cq_.get(),
call);
}
private:
/// The gRPC-generated `AsyncService`.
AsyncService &service_;
/// Pointer to the `AsyncService::RequestMethod` function.
RequestCallFunction<GrpcService, Request, Reply> request_call_function_;
/// The service handler that handles the request.
ServiceHandler &service_handler_;
/// Pointer to the service handler function.
HandleRequestFunction<ServiceHandler, Request, Reply> handle_request_function_;
/// The `CompletionQueue`.
const std::unique_ptr<grpc::ServerCompletionQueue> &cq_;
/// The event loop.
boost::asio::io_service &io_service_;
};
} // namespace rpc
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/worker/core_worker_client.h
|
C/C++ Header
|
#ifndef RAY_RPC_CORE_WORKER_CLIENT_H
#define RAY_RPC_CORE_WORKER_CLIENT_H
#include <grpcpp/grpcpp.h>
#include <deque>
#include <memory>
#include <mutex>
#include <thread>
#include "absl/base/thread_annotations.h"
#include "absl/hash/hash.h"
#include "ray/common/status.h"
#include "ray/rpc/grpc_client.h"
#include "ray/util/logging.h"
#include "src/ray/protobuf/core_worker.grpc.pb.h"
#include "src/ray/protobuf/core_worker.pb.h"
namespace ray {
namespace rpc {
/// The maximum number of requests in flight per client.
const int64_t kMaxBytesInFlight = 16 * 1024 * 1024;
/// The base size in bytes per request.
const int64_t kBaseRequestSize = 1024;
/// Get the estimated size in bytes of the given task.
const static int64_t RequestSizeInBytes(const PushTaskRequest &request) {
int64_t size = kBaseRequestSize;
for (auto &arg : request.task_spec().args()) {
size += arg.data().size();
}
return size;
}
// Shared between direct actor and task submitters.
class CoreWorkerClientInterface;
// TODO(swang): Remove and replace with rpc::Address.
class WorkerAddress {
public:
template <typename H>
friend H AbslHashValue(H h, const WorkerAddress &w) {
return H::combine(std::move(h), w.ip_address, w.port, w.worker_id, w.raylet_id);
}
bool operator==(const WorkerAddress &other) const {
return other.ip_address == ip_address && other.port == port &&
other.worker_id == worker_id && other.raylet_id == raylet_id;
}
rpc::Address ToProto() const {
rpc::Address addr;
addr.set_raylet_id(raylet_id.Binary());
addr.set_ip_address(ip_address);
addr.set_port(port);
addr.set_worker_id(worker_id.Binary());
return addr;
}
/// The ip address of the worker.
const std::string ip_address;
/// The local port of the worker.
const int port;
/// The unique id of the worker.
const WorkerID worker_id;
/// The unique id of the worker raylet.
const ClientID raylet_id;
};
typedef std::function<std::shared_ptr<CoreWorkerClientInterface>(const std::string &,
int)>
ClientFactoryFn;
/// Abstract client interface for testing.
class CoreWorkerClientInterface {
public:
/// This is called by the Raylet to assign a task to the worker.
///
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
/// \return if the rpc call succeeds
virtual ray::Status AssignTask(const AssignTaskRequest &request,
const ClientCallback<AssignTaskReply> &callback) {
return Status::NotImplemented("");
}
/// Push an actor task directly from worker to worker.
///
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
/// \return if the rpc call succeeds
virtual ray::Status PushActorTask(std::unique_ptr<PushTaskRequest> request,
const ClientCallback<PushTaskReply> &callback) {
return Status::NotImplemented("");
}
/// Similar to PushActorTask, but sets no ordering constraint. This is used to
/// push non-actor tasks directly to a worker.
virtual ray::Status PushNormalTask(std::unique_ptr<PushTaskRequest> request,
const ClientCallback<PushTaskReply> &callback) {
return Status::NotImplemented("");
}
/// Notify a wait has completed for direct actor call arguments.
///
/// \param[in] request The request message.
/// \param[in] callback The callback function that handles reply.
/// \return if the rpc call succeeds
virtual ray::Status DirectActorCallArgWaitComplete(
const DirectActorCallArgWaitCompleteRequest &request,
const ClientCallback<DirectActorCallArgWaitCompleteReply> &callback) {
return Status::NotImplemented("");
}
/// Ask the owner of an object about the object's current status.
virtual ray::Status GetObjectStatus(
const GetObjectStatusRequest &request,
const ClientCallback<GetObjectStatusReply> &callback) {
return Status::NotImplemented("");
}
/// Notify the owner of an object that the object has been pinned.
virtual ray::Status WaitForObjectEviction(
const WaitForObjectEvictionRequest &request,
const ClientCallback<WaitForObjectEvictionReply> &callback) {
return Status::NotImplemented("");
}
/// Tell this actor to exit immediately.
virtual ray::Status KillActor(const KillActorRequest &request,
const ClientCallback<KillActorReply> &callback) {
return Status::NotImplemented("");
}
virtual ray::Status GetCoreWorkerStats(
const GetCoreWorkerStatsRequest &request,
const ClientCallback<GetCoreWorkerStatsReply> &callback) {
return Status::NotImplemented("");
}
virtual ~CoreWorkerClientInterface(){};
};
/// Client used for communicating with a remote worker server.
class CoreWorkerClient : public std::enable_shared_from_this<CoreWorkerClient>,
public CoreWorkerClientInterface {
public:
/// Constructor.
///
/// \param[in] address Address of the worker server.
/// \param[in] port Port of the worker server.
/// \param[in] client_call_manager The `ClientCallManager` used for managing requests.
CoreWorkerClient(const std::string &address, const int port,
ClientCallManager &client_call_manager)
: client_call_manager_(client_call_manager) {
grpc_client_ = std::unique_ptr<GrpcClient<CoreWorkerService>>(
new GrpcClient<CoreWorkerService>(address, port, client_call_manager));
};
RPC_CLIENT_METHOD(CoreWorkerService, AssignTask, grpc_client_, override)
RPC_CLIENT_METHOD(CoreWorkerService, DirectActorCallArgWaitComplete, grpc_client_,
override)
RPC_CLIENT_METHOD(CoreWorkerService, GetObjectStatus, grpc_client_, override)
RPC_CLIENT_METHOD(CoreWorkerService, KillActor, grpc_client_, override)
RPC_CLIENT_METHOD(CoreWorkerService, WaitForObjectEviction, grpc_client_, override)
RPC_CLIENT_METHOD(CoreWorkerService, GetCoreWorkerStats, grpc_client_, override)
ray::Status PushActorTask(std::unique_ptr<PushTaskRequest> request,
const ClientCallback<PushTaskReply> &callback) override {
request->set_sequence_number(request->task_spec().actor_task_spec().actor_counter());
{
std::lock_guard<std::mutex> lock(mutex_);
if (request->task_spec().caller_id() != cur_caller_id_) {
// We are running a new task, reset the seq no counter.
max_finished_seq_no_ = -1;
cur_caller_id_ = request->task_spec().caller_id();
}
send_queue_.push_back(std::make_pair(std::move(request), callback));
}
SendRequests();
return ray::Status::OK();
}
ray::Status PushNormalTask(std::unique_ptr<PushTaskRequest> request,
const ClientCallback<PushTaskReply> &callback) override {
request->set_sequence_number(-1);
request->set_client_processed_up_to(-1);
return INVOKE_RPC_CALL(CoreWorkerService, PushTask, *request, callback, grpc_client_);
}
/// Send as many pending tasks as possible. This method is thread-safe.
///
/// The client will guarantee no more than kMaxBytesInFlight bytes of RPCs are being
/// sent at once. This prevents the server scheduling queue from being overwhelmed.
/// See direct_actor.proto for a description of the ordering protocol.
void SendRequests() {
std::lock_guard<std::mutex> lock(mutex_);
auto this_ptr = this->shared_from_this();
while (!send_queue_.empty() && rpc_bytes_in_flight_ < kMaxBytesInFlight) {
auto pair = std::move(*send_queue_.begin());
send_queue_.pop_front();
auto request = std::move(pair.first);
auto callback = pair.second;
int64_t task_size = RequestSizeInBytes(*request);
int64_t seq_no = request->sequence_number();
request->set_client_processed_up_to(max_finished_seq_no_);
rpc_bytes_in_flight_ += task_size;
auto rpc_callback = [this, this_ptr, seq_no, task_size, callback](
Status status, const rpc::PushTaskReply &reply) {
{
std::lock_guard<std::mutex> lock(mutex_);
if (seq_no > max_finished_seq_no_) {
max_finished_seq_no_ = seq_no;
}
rpc_bytes_in_flight_ -= task_size;
RAY_CHECK(rpc_bytes_in_flight_ >= 0);
}
SendRequests();
callback(status, reply);
};
INVOKE_RPC_CALL(CoreWorkerService, PushTask, *request, rpc_callback, grpc_client_);
}
if (!send_queue_.empty()) {
RAY_LOG(DEBUG) << "client send queue size " << send_queue_.size();
}
}
private:
/// Protects against unsafe concurrent access from the callback thread.
std::mutex mutex_;
/// The RPC client.
std::unique_ptr<GrpcClient<CoreWorkerService>> grpc_client_;
/// The `ClientCallManager` used for managing requests.
ClientCallManager &client_call_manager_;
/// Queue of requests to send.
std::deque<std::pair<std::unique_ptr<PushTaskRequest>, ClientCallback<PushTaskReply>>>
send_queue_ GUARDED_BY(mutex_);
/// The number of bytes currently in flight.
int64_t rpc_bytes_in_flight_ GUARDED_BY(mutex_) = 0;
/// The max sequence number we have processed responses for.
int64_t max_finished_seq_no_ GUARDED_BY(mutex_) = -1;
/// The task id we are currently sending requests for. When this changes,
/// the max finished seq no counter is reset.
std::string cur_caller_id_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_CORE_WORKER_CLIENT_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/rpc/worker/core_worker_server.h
|
C/C++ Header
|
#ifndef RAY_RPC_CORE_WORKER_SERVER_H
#define RAY_RPC_CORE_WORKER_SERVER_H
#include "ray/rpc/grpc_server.h"
#include "ray/rpc/server_call.h"
#include "src/ray/protobuf/core_worker.grpc.pb.h"
#include "src/ray/protobuf/core_worker.pb.h"
namespace ray {
class CoreWorker;
namespace rpc {
/// NOTE: See src/ray/core_worker/core_worker.h on how to add a new grpc handler.
#define RAY_CORE_WORKER_RPC_HANDLERS \
RPC_SERVICE_HANDLER(CoreWorkerService, AssignTask, 5) \
RPC_SERVICE_HANDLER(CoreWorkerService, PushTask, 9999) \
RPC_SERVICE_HANDLER(CoreWorkerService, DirectActorCallArgWaitComplete, 100) \
RPC_SERVICE_HANDLER(CoreWorkerService, GetObjectStatus, 9999) \
RPC_SERVICE_HANDLER(CoreWorkerService, WaitForObjectEviction, 9999) \
RPC_SERVICE_HANDLER(CoreWorkerService, KillActor, 9999) \
RPC_SERVICE_HANDLER(CoreWorkerService, GetCoreWorkerStats, 100)
#define RAY_CORE_WORKER_DECLARE_RPC_HANDLERS \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(AssignTask) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(PushTask) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(DirectActorCallArgWaitComplete) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(GetObjectStatus) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(WaitForObjectEviction) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(KillActor) \
DECLARE_VOID_RPC_SERVICE_HANDLER_METHOD(GetCoreWorkerStats)
/// Interface of the `CoreWorkerServiceHandler`, see `src/ray/protobuf/core_worker.proto`.
class CoreWorkerServiceHandler {
public:
/// Handlers. For all of the following handlers, the implementations can
/// handle the request asynchronously. When handling is done, the
/// `send_reply_callback` should be called. See
/// src/ray/rpc/node_manager/node_manager_client.h and
/// src/ray/protobuf/node_manager.proto for a description of the
/// functionality of each handler.
///
/// \param[in] request The request message.
/// \param[out] reply The reply message.
/// \param[in] send_reply_callback The callback to be called when the request is done.
RAY_CORE_WORKER_DECLARE_RPC_HANDLERS
};
/// The `GrpcServer` for `CoreWorkerService`.
class CoreWorkerGrpcService : public GrpcService {
public:
/// Constructor.
///
/// \param[in] main_service See super class.
/// \param[in] handler The service handler that actually handle the requests.
CoreWorkerGrpcService(boost::asio::io_service &main_service,
CoreWorkerServiceHandler &service_handler)
: GrpcService(main_service), service_handler_(service_handler) {}
protected:
grpc::Service &GetGrpcService() override { return service_; }
void InitServerCallFactories(
const std::unique_ptr<grpc::ServerCompletionQueue> &cq,
std::vector<std::pair<std::unique_ptr<ServerCallFactory>, int>>
*server_call_factories_and_concurrencies) override {
RAY_CORE_WORKER_RPC_HANDLERS
}
private:
/// The grpc async service object.
CoreWorkerService::AsyncService service_;
/// The service handler that actually handles the requests.
CoreWorkerServiceHandler &service_handler_;
};
} // namespace rpc
} // namespace ray
#endif // RAY_RPC_CORE_WORKER_SERVER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/metric.cc
|
C++
|
#include "ray/stats/metric.h"
namespace ray {
namespace stats {
static void RegisterAsView(opencensus::stats::ViewDescriptor view_descriptor,
const std::vector<opencensus::tags::TagKey> &keys) {
// Register global keys.
for (const auto &tag : ray::stats::StatsConfig::instance().GetGlobalTags()) {
view_descriptor = view_descriptor.add_column(tag.first);
}
// Register custom keys.
for (const auto &key : keys) {
view_descriptor = view_descriptor.add_column(key);
}
opencensus::stats::View view(view_descriptor);
view_descriptor.RegisterForExport();
}
StatsConfig &StatsConfig::instance() {
static StatsConfig instance;
return instance;
}
void StatsConfig::SetGlobalTags(const TagsType &global_tags) {
global_tags_ = global_tags;
}
const TagsType &StatsConfig::GetGlobalTags() const { return global_tags_; }
void StatsConfig::SetIsDisableStats(bool disable_stats) {
is_stats_disabled_ = disable_stats;
}
bool StatsConfig::IsStatsDisabled() const { return is_stats_disabled_; }
void Metric::Record(double value, const TagsType &tags) {
if (StatsConfig::instance().IsStatsDisabled()) {
return;
}
if (measure_ == nullptr) {
measure_.reset(new opencensus::stats::Measure<double>(
opencensus::stats::Measure<double>::Register(name_, description_, unit_)));
RegisterView();
}
// Do record.
TagsType combined_tags(tags);
combined_tags.insert(std::end(combined_tags),
std::begin(StatsConfig::instance().GetGlobalTags()),
std::end(StatsConfig::instance().GetGlobalTags()));
opencensus::stats::Record({{*measure_, value}}, combined_tags);
}
void Gauge::RegisterView() {
opencensus::stats::ViewDescriptor view_descriptor =
opencensus::stats::ViewDescriptor()
.set_name(name_)
.set_description(description_)
.set_measure(name_)
.set_aggregation(opencensus::stats::Aggregation::LastValue());
RegisterAsView(view_descriptor, tag_keys_);
}
void Histogram::RegisterView() {
opencensus::stats::ViewDescriptor view_descriptor =
opencensus::stats::ViewDescriptor()
.set_name(name_)
.set_description(description_)
.set_measure(name_)
.set_aggregation(opencensus::stats::Aggregation::Distribution(
opencensus::stats::BucketBoundaries::Explicit(boundaries_)));
RegisterAsView(view_descriptor, tag_keys_);
}
void Count::RegisterView() {
opencensus::stats::ViewDescriptor view_descriptor =
opencensus::stats::ViewDescriptor()
.set_name(name_)
.set_description(description_)
.set_measure(name_)
.set_aggregation(opencensus::stats::Aggregation::Count());
RegisterAsView(view_descriptor, tag_keys_);
}
void Sum::RegisterView() {
opencensus::stats::ViewDescriptor view_descriptor =
opencensus::stats::ViewDescriptor()
.set_name(name_)
.set_description(description_)
.set_measure(name_)
.set_aggregation(opencensus::stats::Aggregation::Count());
RegisterAsView(view_descriptor, tag_keys_);
}
} // namespace stats
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/metric.h
|
C/C++ Header
|
#ifndef RAY_STATS_METRIC_H
#define RAY_STATS_METRIC_H
#include <memory>
#include <unordered_map>
#include "opencensus/exporters/stats/prometheus/prometheus_exporter.h"
#include "opencensus/stats/stats.h"
#include "opencensus/tags/tag_key.h"
#include "prometheus/exposer.h"
#include "ray/util/logging.h"
namespace ray {
namespace stats {
/// Include tag_defs.h to define tag items
#include "tag_defs.h"
class StatsConfig final {
public:
static StatsConfig &instance();
void SetGlobalTags(const TagsType &global_tags);
const TagsType &GetGlobalTags() const;
void SetIsDisableStats(bool disable_stats);
bool IsStatsDisabled() const;
private:
StatsConfig() = default;
~StatsConfig() = default;
StatsConfig(const StatsConfig &) = delete;
StatsConfig &operator=(const StatsConfig &) = delete;
private:
TagsType global_tags_;
bool is_stats_disabled_ = true;
};
/// A thin wrapper that wraps the `opencensus::tag::measure` for using it simply.
class Metric {
public:
Metric(const std::string &name, const std::string &description, const std::string &unit,
const std::vector<opencensus::tags::TagKey> &tag_keys = {})
: measure_(nullptr),
name_(name),
description_(description),
unit_(unit),
tag_keys_(tag_keys){};
virtual ~Metric() = default;
Metric &operator()() { return *this; }
/// Get the name of this metric.
std::string GetName() const { return name_; }
/// Record the value for this metric.
void Record(double value) { Record(value, {}); }
/// Record the value for this metric.
///
/// \param value The value that we record.
/// \param tags The tag values that we want to record for this metric record.
void Record(double value, const TagsType &tags);
protected:
virtual void RegisterView() = 0;
protected:
std::string name_;
std::string description_;
std::string unit_;
std::vector<opencensus::tags::TagKey> tag_keys_;
std::unique_ptr<opencensus::stats::Measure<double>> measure_;
}; // class Metric
class Gauge : public Metric {
public:
Gauge(const std::string &name, const std::string &description, const std::string &unit,
const std::vector<opencensus::tags::TagKey> &tag_keys = {})
: Metric(name, description, unit, tag_keys) {}
private:
void RegisterView() override;
}; // class Gauge
class Histogram : public Metric {
public:
Histogram(const std::string &name, const std::string &description,
const std::string &unit, const std::vector<double> boundaries,
const std::vector<opencensus::tags::TagKey> &tag_keys = {})
: Metric(name, description, unit, tag_keys), boundaries_(boundaries) {}
private:
void RegisterView() override;
private:
std::vector<double> boundaries_;
}; // class Histogram
class Count : public Metric {
public:
Count(const std::string &name, const std::string &description, const std::string &unit,
const std::vector<opencensus::tags::TagKey> &tag_keys = {})
: Metric(name, description, unit, tag_keys) {}
private:
void RegisterView() override;
}; // class Count
class Sum : public Metric {
public:
Sum(const std::string &name, const std::string &description, const std::string &unit,
const std::vector<opencensus::tags::TagKey> &tag_keys = {})
: Metric(name, description, unit, tag_keys) {}
private:
void RegisterView() override;
}; // class Sum
} // namespace stats
} // namespace ray
#endif // RAY_STATS_METRIC_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/metric_defs.h
|
C/C++ Header
|
#ifndef RAY_STATS_METRIC_DEFS_H
#define RAY_STATS_METRIC_DEFS_H
/// The definitions of metrics that you can use everywhere.
///
/// There are 4 types of metric:
/// Histogram: Histogram distribution of metric points.
/// Gauge: Keeps the last recorded value, drops everything before.
/// Count: The count of the number of metric points.
/// Sum: A sum up of the metric points.
///
/// You can follow these examples to define your metrics.
static Gauge CurrentWorker("current_worker",
"This metric is used for reporting states of workers."
"Through this, we can see the worker's state on dashboard.",
"1 pcs", {LanguageKey, WorkerPidKey});
static Gauge CurrentDriver("current_driver",
"This metric is used for reporting states of drivers.",
"1 pcs", {LanguageKey, DriverPidKey});
static Count TaskCountReceived("task_count_received",
"Number of tasks received by raylet.", "pcs", {});
static Histogram RedisLatency("redis_latency", "The latency of a Redis operation.", "us",
{100, 200, 300, 400, 500, 600, 700, 800, 900, 1000},
{CustomKey});
static Gauge LocalAvailableResource("local_available_resource",
"The available resources on this node.", "pcs",
{ResourceNameKey});
static Gauge LocalTotalResource("local_total_resource",
"The total resources on this node.", "pcs",
{ResourceNameKey});
static Gauge ActorStats("actor_stats", "Stat metrics of the actors in raylet.", "pcs",
{ValueTypeKey});
static Gauge ObjectManagerStats("object_manager_stats",
"Stat the metric values of object in raylet", "pcs",
{ValueTypeKey});
static Gauge LineageCacheStats("lineage_cache_stats",
"Stats the metric values of lineage cache.", "pcs",
{ValueTypeKey});
static Gauge TaskDependencyManagerStats("task_dependency_manager_stats",
"Stat the metric values of task dependency.",
"pcs", {ValueTypeKey});
static Gauge SchedulingQueueStats("scheduling_queue_stats",
"Stats the metric values of scheduling queue.", "pcs",
{ValueTypeKey});
static Gauge ReconstructionPolicyStats(
"reconstruction_policy_stats", "Stats the metric values of reconstruction policy.",
"pcs", {ValueTypeKey});
static Gauge ConnectionPoolStats("connection_pool_stats",
"Stats the connection pool metrics.", "pcs",
{ValueTypeKey});
#endif // RAY_STATS_METRIC_DEFS_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/stats.h
|
C/C++ Header
|
#ifndef RAY_STATS_STATS_H
#define RAY_STATS_STATS_H
#include <exception>
#include <string>
#include <unordered_map>
#include "opencensus/exporters/stats/prometheus/prometheus_exporter.h"
#include "opencensus/exporters/stats/stdout/stdout_exporter.h"
#include "opencensus/stats/stats.h"
#include "opencensus/tags/tag_key.h"
#include "prometheus/exposer.h"
#include "ray/stats/metric.h"
#include "ray/util/logging.h"
namespace ray {
namespace stats {
/// Include metric_defs.h to define measure items.
#include "metric_defs.h"
/// Initialize stats.
static void Init(const std::string &address, const TagsType &global_tags,
bool disable_stats = false, bool enable_stdout_exporter = false) {
StatsConfig::instance().SetIsDisableStats(disable_stats);
if (disable_stats) {
RAY_LOG(INFO) << "Disabled stats.";
return;
}
// Enable the Prometheus exporter.
// Note that the reason for we using local static variables
// here is to make sure they are single-instances.
static auto exporter =
std::make_shared<opencensus::exporters::stats::PrometheusExporter>();
if (enable_stdout_exporter) {
// Enable stdout exporter by default.
opencensus::exporters::stats::StdoutExporter::Register();
}
// Enable prometheus exporter.
try {
static prometheus::Exposer exposer(address);
exposer.RegisterCollectable(exporter);
RAY_LOG(INFO) << "Succeeded to initialize stats: exporter address is " << address;
} catch (std::exception &e) {
RAY_LOG(WARNING) << "Failed to create the Prometheus exporter. This doesn't "
<< "affect anything except stats. Caused by: " << e.what();
return;
}
StatsConfig::instance().SetGlobalTags(global_tags);
}
} // namespace stats
} // namespace ray
#endif // RAY_STATS_STATS_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/stats_test.cc
|
C++
|
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include "absl/memory/memory.h"
#include "ray/stats/stats.h"
namespace ray {
class MockExporter : public opencensus::stats::StatsExporter::Handler {
public:
static void Register() {
opencensus::stats::StatsExporter::RegisterPushHandler(
absl::make_unique<MockExporter>());
}
void ExportViewData(
const std::vector<std::pair<opencensus::stats::ViewDescriptor,
opencensus::stats::ViewData>> &data) override {
for (const auto &datum : data) {
auto &descriptor = datum.first;
auto &view_data = datum.second;
ASSERT_EQ("current_worker", descriptor.name());
ASSERT_EQ(opencensus::stats::ViewData::Type::kDouble, view_data.type());
for (const auto row : view_data.double_data()) {
for (size_t i = 0; i < descriptor.columns().size(); ++i) {
if (descriptor.columns()[i].name() == "NodeAddress") {
ASSERT_EQ("Localhost", row.first[i]);
}
}
// row.second store the data of this metric.
ASSERT_EQ(2345, row.second);
}
}
}
};
class StatsTest : public ::testing::Test {
public:
void SetUp() {
ray::stats::Init("127.0.0.1:8888", {{stats::NodeAddressKey, "Localhost"}}, false);
MockExporter::Register();
}
void Shutdown() {}
};
TEST_F(StatsTest, F) {
for (size_t i = 0; i < 500; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(50));
stats::CurrentWorker().Record(2345);
}
}
} // namespace ray
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/stats/tag_defs.h
|
C/C++ Header
|
#ifndef RAY_STATS_TAG_DEFS_H
#define RAY_STATS_TAG_DEFS_H
/// The definitions of tag keys that you can use every where.
/// You can follow these examples to define and register your tag keys.
using TagKeyType = opencensus::tags::TagKey;
using TagsType = std::vector<std::pair<opencensus::tags::TagKey, std::string>>;
static const TagKeyType JobNameKey = TagKeyType::Register("JobName");
static const TagKeyType CustomKey = TagKeyType::Register("CustomKey");
static const TagKeyType NodeAddressKey = TagKeyType::Register("NodeAddress");
static const TagKeyType VersionKey = TagKeyType::Register("Version");
static const TagKeyType LanguageKey = TagKeyType::Register("Language");
static const TagKeyType WorkerPidKey = TagKeyType::Register("WorkerPid");
static const TagKeyType DriverPidKey = TagKeyType::Register("DriverPid");
static const TagKeyType ResourceNameKey = TagKeyType::Register("ResourceName");
static const TagKeyType ValueTypeKey = TagKeyType::Register("ValueType");
#endif // RAY_STATS_TAG_DEFS_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/test/run_core_worker_tests.sh
|
Shell
|
#!/usr/bin/env bash
# This needs to be run in the root directory.
# Try to find an unused port for raylet to use.
PORTS="2000 2001 2002 2003 2004 2005 2006 2007 2008 2009"
RAYLET_PORT=0
for port in $PORTS; do
nc -z localhost $port
if [[ $? != 0 ]]; then
RAYLET_PORT=$port
break
fi
done
if [[ $RAYLET_PORT == 0 ]]; then
echo "WARNING: Could not find unused port for raylet to use. Exiting without running tests."
exit
fi
# Cause the script to exit if a single command fails.
set -e
set -x
bazel build -c dbg $RAY_BAZEL_CONFIG "//:core_worker_test" "//:mock_worker" "//:raylet" "//:raylet_monitor" "//:libray_redis_module.so" "@plasma//:plasma_store_server"
# Get the directory in which this script is executing.
SCRIPT_DIR="`dirname \"$0\"`"
RAY_ROOT="$SCRIPT_DIR/../../.."
# Makes $RAY_ROOT an absolute path.
RAY_ROOT="`( cd \"$RAY_ROOT\" && pwd )`"
if [ -z "$RAY_ROOT" ] ; then
exit 1
fi
# Ensure we're in the right directory.
if [ ! -d "$RAY_ROOT/python" ]; then
echo "Unable to find root Ray directory. Has this script moved?"
exit 1
fi
REDIS_MODULE="./bazel-bin/libray_redis_module.so"
BAZEL_BIN_PREFIX="$(bazel info -c dbg $RAY_BAZEL_CONFIG bazel-bin)"
LOAD_MODULE_ARGS="--loadmodule ${REDIS_MODULE}"
STORE_EXEC="$BAZEL_BIN_PREFIX/external/plasma/plasma_store_server"
RAYLET_EXEC="$BAZEL_BIN_PREFIX/raylet"
RAYLET_MONITOR_EXEC="$BAZEL_BIN_PREFIX/raylet_monitor"
MOCK_WORKER_EXEC="$BAZEL_BIN_PREFIX/mock_worker"
# Allow cleanup commands to fail.
bazel run "//:redis-cli" -- -p 6379 shutdown || true
sleep 1s
bazel run "//:redis-cli" -- -p 6380 shutdown || true
sleep 1s
bazel run "//:redis-server" -- --loglevel warning ${LOAD_MODULE_ARGS} --port 6379 &
sleep 2s
bazel run "//:redis-server" -- --loglevel warning ${LOAD_MODULE_ARGS} --port 6380 &
sleep 2s
# Run tests.
bazel run -c dbg $RAY_BAZEL_CONFIG "//:core_worker_test" $STORE_EXEC $RAYLET_EXEC $RAYLET_PORT $RAYLET_MONITOR_EXEC $MOCK_WORKER_EXEC
sleep 1s
bazel run "//:redis-cli" -- -p 6379 shutdown
bazel run "//:redis-cli" -- -p 6380 shutdown
sleep 1s
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/test/run_object_manager_tests.sh
|
Shell
|
#!/usr/bin/env bash
# This needs to be run in the root directory.
# Cause the script to exit if a single command fails.
set -e
set -x
bazel build "//:object_manager_stress_test" "//:object_manager_test" "@plasma//:plasma_store_server"
# Get the directory in which this script is executing.
SCRIPT_DIR="`dirname \"$0\"`"
RAY_ROOT="$SCRIPT_DIR/../../.."
# Makes $RAY_ROOT an absolute path.
RAY_ROOT="`( cd \"$RAY_ROOT\" && pwd )`"
if [ -z "$RAY_ROOT" ] ; then
exit 1
fi
# Ensure we're in the right directory.
if [ ! -d "$RAY_ROOT/python" ]; then
echo "Unable to find root Ray directory. Has this script moved?"
exit 1
fi
REDIS_MODULE="./bazel-bin/libray_redis_module.so"
LOAD_MODULE_ARGS="--loadmodule ${REDIS_MODULE}"
STORE_EXEC="./bazel-bin/external/plasma/plasma_store_server"
# Allow cleanup commands to fail.
bazel run //:redis-cli -- -p 6379 shutdown || true
sleep 1s
bazel run //:redis-server -- --loglevel warning ${LOAD_MODULE_ARGS} --port 6379 &
sleep 1s
# Run tests.
./bazel-bin/object_manager_stress_test $STORE_EXEC
sleep 1s
# Use timeout=1000ms for the Wait tests.
./bazel-bin/object_manager_test $STORE_EXEC 1000
bazel run //:redis-cli -- -p 6379 shutdown
sleep 1s
# Include raylet integration test once it's ready.
# ./bazel-bin/object_manager_integration_test $STORE_EXEC
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/test/run_object_manager_valgrind.sh
|
Shell
|
#!/usr/bin/env bash
# This needs to be run in the root directory.
# Cause the script to exit if a single command fails.
set -e
set -x
bazel build "//:object_manager_stress_test" "//:object_manager_test" "@plasma//:plasma_store_server"
# Get the directory in which this script is executing.
SCRIPT_DIR="`dirname \"$0\"`"
RAY_ROOT="$SCRIPT_DIR/../../.."
# Makes $RAY_ROOT an absolute path.
RAY_ROOT="`( cd \"$RAY_ROOT\" && pwd )`"
if [ -z "$RAY_ROOT" ] ; then
exit 1
fi
# Ensure we're in the right directory.
if [ ! -d "$RAY_ROOT/python" ]; then
echo "Unable to find root Ray directory. Has this script moved?"
exit 1
fi
REDIS_MODULE="./bazel-bin/libray_redis_module.so"
LOAD_MODULE_ARGS="--loadmodule ${REDIS_MODULE}"
STORE_EXEC="./bazel-bin/external/plasma/plasma_store_server"
VALGRIND_CMD="valgrind --track-origins=yes --leak-check=full --show-leak-kinds=all --leak-check-heuristics=stdstring --error-exitcode=1"
# Allow cleanup commands to fail.
killall plasma_store || true
bazel run //:redis-cli -- -p 6379 shutdown || true
sleep 1s
bazel run //:redis-server -- --loglevel warning ${LOAD_MODULE_ARGS} --port 6379 &
sleep 1s
# Run tests. Use timeout=10000ms for the Wait tests since tests run slower
# in valgrind.
$VALGRIND_CMD ./bazel-bin/object_manager_test $STORE_EXEC 10000
sleep 1s
$VALGRIND_CMD ./bazel-bin/object_manager_stress_test $STORE_EXEC
bazel run //:redis-cli -- -p 6379 shutdown
sleep 1s
# Include raylet integration test once it's ready.
# $VALGRIND_CMD ./bazel-bin/object_manager_integration_test $STORE_EXEC
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/util/logging.cc
|
C++
|
#include "ray/util/logging.h"
#ifndef _WIN32
#include <execinfo.h>
#endif
#include <signal.h>
#include <stdlib.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#ifdef RAY_USE_GLOG
#include <sys/stat.h>
#include "glog/logging.h"
#endif
namespace ray {
#ifdef RAY_USE_GLOG
struct StdoutLogger : public google::base::Logger {
virtual void Write(bool /* should flush */, time_t /* timestamp */, const char *message,
int length) {
// note: always flush otherwise it never shows up in raylet.out
std::cout << std::string(message, length) << std::flush;
}
virtual void Flush() { std::cout.flush(); }
virtual google::uint32 LogSize() { return 0; }
};
static StdoutLogger stdout_logger_singleton;
#endif
// This is the default implementation of ray log,
// which is independent of any libs.
class CerrLog {
public:
CerrLog(RayLogLevel severity) : severity_(severity), has_logged_(false) {}
virtual ~CerrLog() {
if (has_logged_) {
std::cerr << std::endl;
}
if (severity_ == RayLogLevel::FATAL) {
PrintBackTrace();
std::abort();
}
}
std::ostream &Stream() {
has_logged_ = true;
return std::cerr;
}
template <class T>
CerrLog &operator<<(const T &t) {
if (severity_ != RayLogLevel::DEBUG) {
has_logged_ = true;
std::cerr << t;
}
return *this;
}
protected:
const RayLogLevel severity_;
bool has_logged_;
void PrintBackTrace() {
#if defined(_EXECINFO_H) || !defined(_WIN32)
void *buffer[255];
const int calls = backtrace(buffer, sizeof(buffer) / sizeof(void *));
backtrace_symbols_fd(buffer, calls, 1);
#endif
}
};
#ifdef RAY_USE_GLOG
typedef google::LogMessage LoggingProvider;
#else
typedef ray::CerrLog LoggingProvider;
#endif
RayLogLevel RayLog::severity_threshold_ = RayLogLevel::INFO;
std::string RayLog::app_name_ = "";
std::string RayLog::log_dir_ = "";
bool RayLog::is_failure_signal_handler_installed_ = false;
#ifdef RAY_USE_GLOG
using namespace google;
// Glog's severity map.
static int GetMappedSeverity(RayLogLevel severity) {
switch (severity) {
case RayLogLevel::DEBUG:
return GLOG_INFO;
case RayLogLevel::INFO:
return GLOG_INFO;
case RayLogLevel::WARNING:
return GLOG_WARNING;
case RayLogLevel::ERROR:
return GLOG_ERROR;
case RayLogLevel::FATAL:
return GLOG_FATAL;
default:
RAY_LOG(FATAL) << "Unsupported logging level: " << static_cast<int>(severity);
// This return won't be hit but compiler needs it.
return GLOG_FATAL;
}
}
#endif
void RayLog::StartRayLog(const std::string &app_name, RayLogLevel severity_threshold,
const std::string &log_dir) {
const char *var_value = getenv("RAY_BACKEND_LOG_LEVEL");
if (var_value != nullptr) {
std::string data = var_value;
std::transform(data.begin(), data.end(), data.begin(), ::tolower);
if (data == "debug") {
severity_threshold = RayLogLevel::DEBUG;
} else if (data == "info") {
severity_threshold = RayLogLevel::INFO;
} else if (data == "warning") {
severity_threshold = RayLogLevel::WARNING;
} else if (data == "error") {
severity_threshold = RayLogLevel::ERROR;
} else if (data == "fatal") {
severity_threshold = RayLogLevel::FATAL;
} else {
RAY_LOG(WARNING) << "Unrecognized setting of RAY_BACKEND_LOG_LEVEL=" << var_value;
}
RAY_LOG(INFO) << "Set ray log level from environment variable RAY_BACKEND_LOG_LEVEL"
<< " to " << static_cast<int>(severity_threshold);
}
severity_threshold_ = severity_threshold;
app_name_ = app_name;
log_dir_ = log_dir;
#ifdef RAY_USE_GLOG
google::InitGoogleLogging(app_name_.c_str());
if (log_dir_.empty()) {
google::SetStderrLogging(GetMappedSeverity(RayLogLevel::ERROR));
int level = GetMappedSeverity(severity_threshold_);
google::base::SetLogger(level, &stdout_logger_singleton);
} else {
// Enable log file if log_dir_ is not empty.
auto dir_ends_with_slash = log_dir_;
if (log_dir_[log_dir_.length() - 1] != '/') {
dir_ends_with_slash += "/";
}
auto app_name_without_path = app_name;
if (app_name.empty()) {
app_name_without_path = "DefaultApp";
} else {
// Find the app name without the path.
size_t pos = app_name.rfind('/');
if (pos != app_name.npos && pos + 1 < app_name.length()) {
app_name_without_path = app_name.substr(pos + 1);
}
}
google::SetLogFilenameExtension(app_name_without_path.c_str());
int level = GetMappedSeverity(severity_threshold_);
google::SetLogDestination(level, dir_ends_with_slash.c_str());
}
#endif
}
void RayLog::UninstallSignalAction() {
#ifdef RAY_USE_GLOG
if (!is_failure_signal_handler_installed_) {
return;
}
RAY_LOG(DEBUG) << "Uninstall signal handlers.";
// This signal list comes from glog's signalhandler.cc.
// https://github.com/google/glog/blob/master/src/signalhandler.cc#L58-L70
std::vector<int> installed_signals({SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGTERM});
#ifdef _WIN32 // Do NOT use WIN32 (without the underscore); we want _WIN32 here
for (int signal_num : installed_signals) {
RAY_CHECK(signal(signal_num, SIG_DFL) != SIG_ERR);
}
#else
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
for (int signal_num : installed_signals) {
RAY_CHECK(sigaction(signal_num, &sig_action, NULL) == 0);
}
#endif
is_failure_signal_handler_installed_ = false;
#endif
}
void RayLog::ShutDownRayLog() {
#ifdef RAY_USE_GLOG
UninstallSignalAction();
if (!log_dir_.empty()) {
google::ShutdownGoogleLogging();
}
#endif
}
void RayLog::InstallFailureSignalHandler() {
#ifdef RAY_USE_GLOG
if (is_failure_signal_handler_installed_) {
return;
}
google::InstallFailureSignalHandler();
is_failure_signal_handler_installed_ = true;
#endif
}
bool RayLog::IsLevelEnabled(RayLogLevel log_level) {
return log_level >= severity_threshold_;
}
RayLog::RayLog(const char *file_name, int line_number, RayLogLevel severity)
// glog does not have DEBUG level, we can handle it using is_enabled_.
: logging_provider_(nullptr), is_enabled_(severity >= severity_threshold_) {
#ifdef RAY_USE_GLOG
if (is_enabled_) {
logging_provider_ =
new google::LogMessage(file_name, line_number, GetMappedSeverity(severity));
}
#else
auto logging_provider = new CerrLog(severity);
*logging_provider << file_name << ":" << line_number << ": ";
logging_provider_ = logging_provider;
#endif
}
std::ostream &RayLog::Stream() {
auto logging_provider = reinterpret_cast<LoggingProvider *>(logging_provider_);
#ifdef RAY_USE_GLOG
// Before calling this function, user should check IsEnabled.
// When IsEnabled == false, logging_provider_ will be empty.
return logging_provider->stream();
#else
return logging_provider->Stream();
#endif
}
bool RayLog::IsEnabled() const { return is_enabled_; }
RayLog::~RayLog() {
if (logging_provider_ != nullptr) {
delete reinterpret_cast<LoggingProvider *>(logging_provider_);
logging_provider_ = nullptr;
}
}
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/util/logging.h
|
C/C++ Header
|
#ifndef RAY_UTIL_LOGGING_H
#define RAY_UTIL_LOGGING_H
#include <iostream>
#include <string>
#if defined(_WIN32)
#ifndef _WINDOWS_
#ifndef WIN32_LEAN_AND_MEAN // Sorry for the inconvenience. Please include any related
// headers you need manually.
// (https://stackoverflow.com/a/8294669)
#define WIN32_LEAN_AND_MEAN // Prevent inclusion of WinSock2.h
#endif
#include <Windows.h> // Force inclusion of WinGDI here to resolve name conflict
#endif
#ifdef ERROR // Should be true unless someone else undef'd it already
#undef ERROR // Windows GDI defines this macro; make it a global enum so it doesn't
// conflict with our code
enum { ERROR = 0 };
#endif
#endif
namespace ray {
enum class RayLogLevel { DEBUG = -1, INFO = 0, WARNING = 1, ERROR = 2, FATAL = 3 };
#define RAY_LOG_INTERNAL(level) ::ray::RayLog(__FILE__, __LINE__, level)
#define RAY_LOG_ENABLED(level) ray::RayLog::IsLevelEnabled(ray::RayLogLevel::level)
#define RAY_LOG(level) \
if (ray::RayLog::IsLevelEnabled(ray::RayLogLevel::level)) \
RAY_LOG_INTERNAL(ray::RayLogLevel::level)
#define RAY_IGNORE_EXPR(expr) ((void)(expr))
#define RAY_CHECK(condition) \
(condition) \
? RAY_IGNORE_EXPR(0) \
: ::ray::Voidify() & ::ray::RayLog(__FILE__, __LINE__, ray::RayLogLevel::FATAL) \
<< " Check failed: " #condition " "
#ifdef NDEBUG
#define RAY_DCHECK(condition) \
(condition) \
? RAY_IGNORE_EXPR(0) \
: ::ray::Voidify() & ::ray::RayLog(__FILE__, __LINE__, ray::RayLogLevel::ERROR) \
<< " Debug check failed: " #condition " "
#else
#define RAY_DCHECK(condition) RAY_CHECK(condition)
#endif // NDEBUG
// To make the logging lib plugable with other logging libs and make
// the implementation unawared by the user, RayLog is only a declaration
// which hide the implementation into logging.cc file.
// In logging.cc, we can choose different log libs using different macros.
// This is also a null log which does not output anything.
class RayLogBase {
public:
virtual ~RayLogBase(){};
// By default, this class is a null log because it return false here.
virtual bool IsEnabled() const { return false; };
template <typename T>
RayLogBase &operator<<(const T &t) {
if (IsEnabled()) {
Stream() << t;
}
return *this;
}
protected:
virtual std::ostream &Stream() { return std::cerr; };
};
class RayLog : public RayLogBase {
public:
RayLog(const char *file_name, int line_number, RayLogLevel severity);
virtual ~RayLog();
/// Return whether or not current logging instance is enabled.
///
/// \return True if logging is enabled and false otherwise.
virtual bool IsEnabled() const;
/// The init function of ray log for a program which should be called only once.
///
/// \parem appName The app name which starts the log.
/// \param severity_threshold Logging threshold for the program.
/// \param logDir Logging output file name. If empty, the log won't output to file.
static void StartRayLog(const std::string &appName,
RayLogLevel severity_threshold = RayLogLevel::INFO,
const std::string &logDir = "");
/// The shutdown function of ray log which should be used with StartRayLog as a pair.
static void ShutDownRayLog();
/// Uninstall the signal actions installed by InstallFailureSignalHandler.
static void UninstallSignalAction();
/// Return whether or not the log level is enabled in current setting.
///
/// \param log_level The input log level to test.
/// \return True if input log level is not lower than the threshold.
static bool IsLevelEnabled(RayLogLevel log_level);
/// Install the failure signal handler to output call stack when crash.
/// If glog is not installed, this function won't do anything.
static void InstallFailureSignalHandler();
// Get the log level from environment variable.
static RayLogLevel GetLogLevelFromEnv();
private:
// Hide the implementation of log provider by void *.
// Otherwise, lib user may define the same macro to use the correct header file.
void *logging_provider_;
/// True if log messages should be logged and false if they should be ignored.
bool is_enabled_;
static RayLogLevel severity_threshold_;
// In InitGoogleLogging, it simply keeps the pointer.
// We need to make sure the app name passed to InitGoogleLogging exist.
static std::string app_name_;
/// The directory where the log files are stored.
/// If this is empty, logs are printed to stdout.
static std::string log_dir_;
/// This flag is used to avoid calling UninstallSignalAction in ShutDownRayLog if
/// InstallFailureSignalHandler was not called.
static bool is_failure_signal_handler_installed_;
protected:
virtual std::ostream &Stream();
};
// This class make RAY_CHECK compilation pass to change the << operator to void.
// This class is copied from glog.
class Voidify {
public:
Voidify() {}
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(RayLogBase &) {}
};
} // namespace ray
#endif // RAY_UTIL_LOGGING_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.