file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
python/ray/experimental/serve/tests/test_persistence.py
|
Python
|
import os
import subprocess
import tempfile
import ray
from ray.experimental import serve
def test_new_driver(serve_instance):
script = """
import ray
ray.init(address="auto")
from ray.experimental import serve
serve.init()
def function(flask_request):
return "OK!"
serve.create_endpoint("driver", "/driver")
serve.create_backend(function, "driver:v1")
serve.link("driver", "driver:v1")
"""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
path = f.name
f.write(script)
proc = subprocess.Popen(["python", path])
return_code = proc.wait(timeout=10)
assert return_code == 0
handle = serve.get_handle("driver")
assert ray.get(handle.remote()) == "OK!"
os.remove(path)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/serve/tests/test_queue.py
|
Python
|
import pytest
import ray
from ray.experimental.serve.queues import RandomPolicyQueue
from ray.experimental.serve.queues import (RoundRobinPolicyQueue,
FixedPackingPolicyQueue)
@pytest.fixture(scope="session")
def task_runner_mock_actor():
@ray.remote
class TaskRunnerMock:
def __init__(self):
self.result = None
def _ray_serve_call(self, request_item):
self.result = request_item
def get_recent_call(self):
return self.result
actor = TaskRunnerMock.remote()
yield actor
def test_single_prod_cons_queue(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueue()
q.link("svc", "backend")
result_object_id = q.enqueue_request("svc", 1, "kwargs", None)
q.dequeue_request("backend", task_runner_mock_actor)
got_work = ray.get(task_runner_mock_actor.get_recent_call.remote())
assert got_work.request_args == 1
assert got_work.request_kwargs == "kwargs"
ray.worker.global_worker.put_object(2, got_work.result_object_id)
assert ray.get(ray.ObjectID(result_object_id)) == 2
def test_slo(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueue()
q.link("svc", "backend")
for i in range(10):
slo_ms = 1000 - 100 * i
q.enqueue_request("svc", i, "kwargs", None, request_slo_ms=slo_ms)
for i in range(10):
q.dequeue_request("backend", task_runner_mock_actor)
got_work = ray.get(task_runner_mock_actor.get_recent_call.remote())
assert got_work.request_args == (9 - i)
def test_alter_backend(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueue()
q.set_traffic("svc", {"backend-1": 1})
result_object_id = q.enqueue_request("svc", 1, "kwargs", None)
q.dequeue_request("backend-1", task_runner_mock_actor)
got_work = ray.get(task_runner_mock_actor.get_recent_call.remote())
assert got_work.request_args == 1
ray.worker.global_worker.put_object(2, got_work.result_object_id)
assert ray.get(ray.ObjectID(result_object_id)) == 2
q.set_traffic("svc", {"backend-2": 1})
result_object_id = q.enqueue_request("svc", 1, "kwargs", None)
q.dequeue_request("backend-2", task_runner_mock_actor)
got_work = ray.get(task_runner_mock_actor.get_recent_call.remote())
assert got_work.request_args == 1
ray.worker.global_worker.put_object(2, got_work.result_object_id)
assert ray.get(ray.ObjectID(result_object_id)) == 2
def test_split_traffic(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueue()
q.set_traffic("svc", {"backend-1": 0.5, "backend-2": 0.5})
# assume 50% split, the probability of all 20 requests goes to a
# single queue is 0.5^20 ~ 1-6
for _ in range(20):
q.enqueue_request("svc", 1, "kwargs", None)
q.dequeue_request("backend-1", task_runner_mock_actor)
result_one = ray.get(task_runner_mock_actor.get_recent_call.remote())
q.dequeue_request("backend-2", task_runner_mock_actor)
result_two = ray.get(task_runner_mock_actor.get_recent_call.remote())
got_work = [result_one, result_two]
assert [g.request_args for g in got_work] == [1, 1]
def test_split_traffic_round_robin(serve_instance, task_runner_mock_actor):
q = RoundRobinPolicyQueue()
q.set_traffic("svc", {"backend-1": 0.5, "backend-2": 0.5})
# since round robin policy is stateful firing two queries consecutively
# would transfer the queries to two different backends
for _ in range(2):
q.enqueue_request("svc", 1, "kwargs", None)
q.dequeue_request("backend-1", task_runner_mock_actor)
result_one = ray.get(task_runner_mock_actor.get_recent_call.remote())
q.dequeue_request("backend-2", task_runner_mock_actor)
result_two = ray.get(task_runner_mock_actor.get_recent_call.remote())
got_work = [result_one, result_two]
assert [g.request_args for g in got_work] == [1, 1]
def test_split_traffic_fixed_packing(serve_instance, task_runner_mock_actor):
packing_num = 4
q = FixedPackingPolicyQueue(packing_num=packing_num)
q.set_traffic("svc", {"backend-1": 0.5, "backend-2": 0.5})
# fire twice the number of queries as the packing number
for i in range(2 * packing_num):
q.enqueue_request("svc", i, "kwargs", None)
# both the backends will get equal number of queries
# as it is packed round robin
for _ in range(packing_num):
q.dequeue_request("backend-1", task_runner_mock_actor)
result_one = ray.get(task_runner_mock_actor.get_recent_call.remote())
for _ in range(packing_num):
q.dequeue_request("backend-2", task_runner_mock_actor)
result_two = ray.get(task_runner_mock_actor.get_recent_call.remote())
got_work = [result_one, result_two]
assert [g.request_args
for g in got_work] == [packing_num - 1, 2 * packing_num - 1]
def test_queue_remove_replicas(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueue()
q.dequeue_request("backend", task_runner_mock_actor)
q.remove_and_destory_replica("backend", task_runner_mock_actor)
assert len(q.workers["backend"]) == 0
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/serve/tests/test_routing.py
|
Python
|
import os
import tempfile
from ray.experimental.serve.kv_store_service import (
InMemoryKVStore, RayInternalKVStore, SQLiteKVStore)
def test_default_in_memory_kv():
kv = InMemoryKVStore("")
kv.put("1", 2)
assert kv.get("1") == 2
kv.put("1", 3)
assert kv.get("1") == 3
assert kv.as_dict() == {"1": 3}
def test_ray_interal_kv(ray_instance):
kv = RayInternalKVStore("")
kv.put("1", 2)
assert kv.get("1") == 2
kv.put("1", 3)
assert kv.get("1") == 3
assert kv.as_dict() == {"1": 3}
kv = RayInternalKVStore("othernamespace")
kv.put("1", 2)
assert kv.get("1") == 2
kv.put("1", 3)
assert kv.get("1") == 3
assert kv.as_dict() == {"1": 3}
def test_sqlite_kv():
_, path = tempfile.mkstemp()
# Test get
kv = SQLiteKVStore("routing_table", db_path=path)
kv.put("/api", "api-endpoint")
assert kv.get("/api") == "api-endpoint"
assert kv.get("not-exist") is None
# Test namespace
kv2 = SQLiteKVStore("other_table", db_path=path)
kv2.put("/api", "api-endpoint-two")
assert kv2.get("/api") == "api-endpoint-two"
# Test as dict
assert kv.as_dict() == {"/api": "api-endpoint"}
# Test override
kv.put("/api", "api-new")
assert kv.get("/api") == "api-new"
os.remove(path)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/serve/tests/test_task_runner.py
|
Python
|
import pytest
import ray
import ray.experimental.serve.context as context
from ray.experimental.serve.queues import RoundRobinPolicyQueueActor
from ray.experimental.serve.task_runner import (
RayServeMixin, TaskRunner, TaskRunnerActor, wrap_to_ray_error)
def test_runner_basic():
def echo(i):
return i
r = TaskRunner(echo)
assert r(1) == 1
def test_runner_wraps_error():
wrapped = wrap_to_ray_error(Exception())
assert isinstance(wrapped, ray.exceptions.RayTaskError)
def test_runner_actor(serve_instance):
q = RoundRobinPolicyQueueActor.remote()
def echo(flask_request, i=None):
return i
CONSUMER_NAME = "runner"
PRODUCER_NAME = "prod"
runner = TaskRunnerActor.remote(echo)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_fetch.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
for query in [333, 444, 555]:
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": query},
request_context=context.TaskContext.Python)))
assert ray.get(result_token) == query
def test_ray_serve_mixin(serve_instance):
q = RoundRobinPolicyQueueActor.remote()
CONSUMER_NAME = "runner-cls"
PRODUCER_NAME = "prod-cls"
class MyAdder:
def __init__(self, inc):
self.increment = inc
def __call__(self, flask_request, i=None):
return i + self.increment
@ray.remote
class CustomActor(MyAdder, RayServeMixin):
pass
runner = CustomActor.remote(3)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_fetch.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
for query in [333, 444, 555]:
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": query},
request_context=context.TaskContext.Python)))
assert ray.get(result_token) == query + 3
def test_task_runner_check_context(serve_instance):
q = RoundRobinPolicyQueueActor.remote()
def echo(flask_request, i=None):
# Accessing the flask_request without web context should throw.
return flask_request.args["i"]
CONSUMER_NAME = "runner"
PRODUCER_NAME = "producer"
runner = TaskRunnerActor.remote(echo)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_fetch.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": 42},
request_context=context.TaskContext.Python)))
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(result_token)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/serve/tests/test_util.py
|
Python
|
import json
from ray.experimental.serve.utils import BytesEncoder
def test_bytes_encoder():
data_before = {"inp": {"nest": b"bytes"}}
data_after = {"inp": {"nest": "bytes"}}
assert json.loads(json.dumps(data_before, cls=BytesEncoder)) == data_after
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/serve/utils.py
|
Python
|
import json
import logging
import random
import string
import time
import io
import requests
from pygments import formatters, highlight, lexers
from ray.experimental.serve.context import FakeFlaskRequest, TaskContext
from ray.experimental.serve.http_util import build_flask_request
def parse_request_item(request_item):
if request_item.request_context == TaskContext.Web:
is_web_context = True
asgi_scope, body_bytes = request_item.request_args
flask_request = build_flask_request(asgi_scope, io.BytesIO(body_bytes))
args = (flask_request, )
kwargs = {}
else:
is_web_context = False
args = (FakeFlaskRequest(), )
kwargs = request_item.request_kwargs
result_object_id = request_item.result_object_id
return args, kwargs, is_web_context, result_object_id
def _get_logger():
logger = logging.getLogger("ray.serve")
# TODO(simon): Make logging level configurable.
logger.setLevel(logging.INFO)
return logger
logger = _get_logger()
class BytesEncoder(json.JSONEncoder):
"""Allow bytes to be part of the JSON document.
BytesEncoder will walk the JSON tree and decode bytes with utf-8 codec.
Example:
>>> json.dumps({b'a': b'c'}, cls=BytesEncoder)
'{"a":"c"}'
"""
def default(self, o): # pylint: disable=E0202
if isinstance(o, bytes):
return o.decode("utf-8")
return super().default(o)
def pformat_color_json(d):
"""Use pygments to pretty format and colroize dictionary"""
formatted_json = json.dumps(d, sort_keys=True, indent=4)
colorful_json = highlight(formatted_json, lexers.JsonLexer(),
formatters.TerminalFormatter())
return colorful_json
def block_until_http_ready(http_endpoint, num_retries=5, backoff_time_s=1):
http_is_ready = False
retries = num_retries
while not http_is_ready:
try:
resp = requests.get(http_endpoint)
assert resp.status_code == 200
http_is_ready = True
except Exception:
pass
# Exponential backoff
time.sleep(backoff_time_s)
backoff_time_s *= 2
retries -= 1
if retries == 0:
raise Exception(
"HTTP server not ready after {} retries.".format(num_retries))
def get_random_letters(length=6):
return "".join(random.choices(string.ascii_letters, k=length))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/examples/cifar_pytorch_example.py
|
Python
|
import os
import torch
import torch.nn as nn
import argparse
from ray import tune
import torch.utils.data
from torch import distributed
from torch.utils.data.distributed import DistributedSampler
import torchvision
import torchvision.transforms as transforms
import ray
from ray.experimental.sgd.pytorch import (PyTorchTrainer, PyTorchTrainable)
from ray.experimental.sgd.pytorch.resnet import ResNet18
def initialization_hook(runner):
print("NCCL DEBUG SET")
# Need this for avoiding a connection restart issue
os.environ["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
os.environ["NCCL_LL_THRESHOLD"] = "0"
os.environ["NCCL_DEBUG"] = "INFO"
def train(model, train_iterator, criterion, optimizer, config):
model.train()
train_loss, total_num, correct = 0, 0, 0
for batch_idx, (data, target) in enumerate(train_iterator):
if config.get("test_mode") and batch_idx > 0:
break
# get small model update
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
loss.backward()
train_loss += loss.item() * target.size(0)
total_num += target.size(0)
_, predicted = output.max(1)
correct += predicted.eq(target).sum().item()
optimizer.step()
optimizer.zero_grad()
stats = {
"train_loss": train_loss / total_num,
"train_acc": correct / total_num
}
return stats
def validate(model, val_iterator, criterion, config):
# switch to evaluate mode
model.eval()
correct = 0
total = 0
total_loss = 0
with torch.no_grad():
for batch_idx, (features, target) in enumerate(val_iterator):
if config.get("test_mode") and batch_idx > 10:
break
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(features)
loss = criterion(output, target)
total_loss += loss.item() * target.size(0)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
stats = {"mean_accuracy": correct / total, "mean_loss": total_loss / total}
return stats
def cifar_creator(batch_size, config):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
train_dataset = torchvision.datasets.CIFAR10(
root="~/data", train=True, download=True, transform=transform_train)
validation_dataset = torchvision.datasets.CIFAR10(
root="~/data", train=False, download=False, transform=transform_test)
train_sampler = None
if distributed.is_initialized():
train_sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
num_workers=2,
pin_memory=False,
sampler=train_sampler)
validation_sampler = None
if distributed.is_initialized():
validation_sampler = DistributedSampler(validation_dataset)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=batch_size,
shuffle=(validation_sampler is None),
num_workers=2,
pin_memory=False,
sampler=validation_sampler)
return train_loader, validation_loader
def optimizer_creator(model, config):
"""Returns optimizer"""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 0.1))
def train_example(num_replicas=1, use_gpu=False, test_mode=False):
config = {"test_mode": test_mode}
trainer1 = PyTorchTrainer(
ResNet18,
cifar_creator,
optimizer_creator,
lambda config: nn.CrossEntropyLoss(),
initialization_hook=initialization_hook,
train_function=train,
validation_function=validate,
num_replicas=num_replicas,
config=config,
use_gpu=use_gpu,
batch_size=16 if test_mode else 512,
backend="nccl" if use_gpu else "gloo")
for i in range(5):
stats = trainer1.train()
print(stats)
print(trainer1.validate())
trainer1.shutdown()
print("success!")
def tune_example(num_replicas=1, use_gpu=False, test_mode=False):
config = {
"model_creator": ResNet18,
"data_creator": cifar_creator,
"optimizer_creator": optimizer_creator,
"loss_creator": lambda config: nn.CrossEntropyLoss(),
"train_function": train,
"validation_function": validate,
"num_replicas": num_replicas,
"initialization_hook": initialization_hook,
"use_gpu": use_gpu,
"batch_size": 16 if test_mode else 512,
"config": {
"lr": tune.choice([1e-4, 1e-3, 5e-3, 1e-2]),
"test_mode": test_mode
},
"backend": "nccl" if use_gpu else "gloo"
}
analysis = tune.run(
PyTorchTrainable,
num_samples=2,
config=config,
stop={"training_iteration": 2},
verbose=2)
return analysis.get_best_config(metric="mean_accuracy", mode="max")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--ray-redis-address",
required=False,
type=str,
help="the address to use for Redis")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
parser.add_argument(
"--tune", action="store_true", default=False, help="Tune training")
args, _ = parser.parse_known_args()
ray.init(address=args.ray_redis_address, log_to_driver=False)
if args.tune:
tune_example(
num_replicas=args.num_replicas,
use_gpu=args.use_gpu,
test_mode=args.smoke_test)
else:
train_example(
num_replicas=args.num_replicas,
use_gpu=args.use_gpu,
test_mode=args.smoke_test)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/examples/cifar_tf_example.py
|
Python
|
"""
#Train a simple deep CNN on the CIFAR10 small images dataset.
It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs.
(it"s still underfitting at that point, though).
"""
import argparse
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
from filelock import FileLock
import ray
from ray.experimental.sgd.tf.tf_trainer import TFTrainer
num_classes = 10
def fetch_keras_data():
import tensorflow as tf
# The data, split between train and test sets:
with FileLock(os.path.expanduser("~/.cifar.lock")):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = fetch_keras_data()
input_shape = x_train.shape[1:]
def create_model(config):
import tensorflow as tf
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation("softmax"))
# initiate RMSprop optimizer
opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6)
# Let"s train the model using RMSprop
model.compile(
loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
def data_creator(config):
import tensorflow as tf
batch_size = config["batch_size"]
(x_train, y_train), (x_test, y_test) = fetch_keras_data()
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# Repeat is needed to avoid
train_dataset = train_dataset.repeat().shuffle(
len(x_train)).batch(batch_size)
test_dataset = test_dataset.repeat().batch(batch_size)
return train_dataset, test_dataset
def _make_generator(x_train, y_train, batch_size):
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
# divide inputs by std of the dataset
featurewise_std_normalization=False,
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode="nearest",
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
return datagen.flow(x_train, y_train, batch_size=batch_size)
def data_augmentation_creator(config):
import tensorflow as tf
batch_size = config["batch_size"]
(x_train, y_train), (x_test, y_test) = fetch_keras_data()
trainset = tf.data.Dataset.from_generator(
lambda: _make_generator(x_train, y_train, batch_size),
output_types=(tf.float32, tf.float32),
# https://github.com/tensorflow/tensorflow/issues/24520
output_shapes=(tf.TensorShape((None, None, None, None)),
tf.TensorShape((None, 10))))
trainset = trainset.repeat()
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.repeat().batch(batch_size)
return trainset, test_dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--batch-size", type=int, default=32, help="Sets batch size.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--augment-data",
action="store_true",
default=False,
help="Sets data augmentation.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing. Assume False for users.")
args, _ = parser.parse_known_args()
ray.init(address=args.address)
data_size = 60000
test_size = 10000
batch_size = args.batch_size
num_train_steps = 10 if args.smoke_test else data_size // batch_size
num_eval_steps = 10 if args.smoke_test else test_size // batch_size
trainer = TFTrainer(
model_creator=create_model,
data_creator=(data_augmentation_creator
if args.augment_data else data_creator),
num_replicas=args.num_replicas,
use_gpu=args.use_gpu,
verbose=True,
config={
"batch_size": batch_size,
"fit_config": {
"steps_per_epoch": num_train_steps,
},
"evaluate_config": {
"steps": num_eval_steps,
}
})
for i in range(3):
# Trains num epochs
train_stats1 = trainer.train()
train_stats1.update(trainer.validate())
print("iter {}:".format(i), train_stats1)
model = trainer.get_model()
trainer.shutdown()
dataset, test_dataset = data_augmentation_creator(
dict(batch_size=batch_size))
model.fit(dataset, steps_per_epoch=num_train_steps, epochs=1)
scores = model.evaluate(test_dataset, steps=num_eval_steps)
print("Test loss:", scores[0])
print("Test accuracy:", scores[1])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/examples/tensorflow_train_example.py
|
Python
|
import argparse
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import numpy as np
import ray
from ray import tune
from ray.experimental.sgd.tf.tf_trainer import TFTrainer, TFTrainable
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
def create_config(batch_size):
return {
"batch_size": batch_size,
"fit_config": {
"steps_per_epoch": NUM_TRAIN_SAMPLES // batch_size
},
"evaluate_config": {
"steps": NUM_TEST_SAMPLES // batch_size,
}
}
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def simple_dataset(config):
batch_size = config["batch_size"]
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).repeat().batch(
batch_size)
test_dataset = test_dataset.repeat().batch(batch_size)
return train_dataset, test_dataset
def simple_model(config):
model = Sequential([Dense(10, input_shape=(1, )), Dense(1)])
model.compile(
optimizer="sgd",
loss="mean_squared_error",
metrics=["mean_squared_error"])
return model
def train_example(num_replicas=1, batch_size=128, use_gpu=False):
trainer = TFTrainer(
model_creator=simple_model,
data_creator=simple_dataset,
num_replicas=num_replicas,
use_gpu=use_gpu,
verbose=True,
config=create_config(batch_size))
train_stats1 = trainer.train()
train_stats1.update(trainer.validate())
print(train_stats1)
train_stats2 = trainer.train()
train_stats2.update(trainer.validate())
print(train_stats2)
val_stats = trainer.validate()
print(val_stats)
print("success!")
def tune_example(num_replicas=1, use_gpu=False):
config = {
"model_creator": tune.function(simple_model),
"data_creator": tune.function(simple_dataset),
"num_replicas": num_replicas,
"use_gpu": use_gpu,
"trainer_config": create_config(batch_size=128)
}
analysis = tune.run(
TFTrainable,
num_samples=2,
config=config,
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="validation_loss", mode="min")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--tune", action="store_true", default=False, help="Tune training")
args, _ = parser.parse_known_args()
ray.init(address=args.address)
if args.tune:
tune_example(num_replicas=args.num_replicas, use_gpu=args.use_gpu)
else:
train_example(num_replicas=args.num_replicas, use_gpu=args.use_gpu)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/examples/train_example.py
|
Python
|
"""
This file holds code for a Training guide for PytorchSGD in the documentation.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
# yapf: disable
# __torch_train_example__
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch import distributed
from torch.utils.data.distributed import DistributedSampler
from ray.experimental.sgd.pytorch.pytorch_trainer import PyTorchTrainer
class LinearDataset(torch.utils.data.Dataset):
"""y = a * x + b"""
def __init__(self, a, b, size=1000):
x = np.arange(0, 10, 10 / size, dtype=np.float32)
self.x = torch.from_numpy(x)
self.y = torch.from_numpy(a * x + b)
def __getitem__(self, index):
return self.x[index, None], self.y[index, None]
def __len__(self):
return len(self.x)
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(model, config):
"""Returns optimizer."""
return torch.optim.SGD(model.parameters(), lr=1e-2)
def data_creator(batch_size, config):
"""Returns training dataloader, validation dataloader."""
train_dataset = LinearDataset(2, 5)
validation_dataset = LinearDataset(2, 5, size=400)
train_sampler = None
if distributed.is_initialized():
train_sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler)
validation_sampler = None
if distributed.is_initialized():
validation_sampler = DistributedSampler(validation_dataset)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=batch_size,
shuffle=(validation_sampler is None),
sampler=validation_sampler)
return train_loader, validation_loader
def train_example(num_replicas=1, use_gpu=False):
trainer1 = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=num_replicas,
use_gpu=use_gpu,
batch_size=num_replicas * 4,
backend="gloo")
for i in range(5):
stats = trainer1.train()
print(stats)
print(trainer1.validate())
m = trainer1.get_model()
print("trained weight: % .2f, bias: % .2f" % (
m.weight.item(), m.bias.item()))
trainer1.shutdown()
print("success!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--tune", action="store_true", default=False, help="Tune training")
args, _ = parser.parse_known_args()
import ray
ray.init(address=args.address)
train_example(num_replicas=args.num_replicas, use_gpu=args.use_gpu)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/examples/tune_example.py
|
Python
|
# yapf: disable
"""
This file holds code for a Distributed Pytorch + Tune page in the docs.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
# __torch_tune_example__
import numpy as np
import torch
import torch.nn as nn
from torch import distributed
from torch.utils.data.distributed import DistributedSampler
import ray
from ray import tune
from ray.experimental.sgd.pytorch.pytorch_trainer import PyTorchTrainable
class LinearDataset(torch.utils.data.Dataset):
"""y = a * x + b"""
def __init__(self, a, b, size=1000):
x = np.random.random(size).astype(np.float32) * 10
x = np.arange(0, 10, 10 / size, dtype=np.float32)
self.x = torch.from_numpy(x)
self.y = torch.from_numpy(a * x + b)
def __getitem__(self, index):
return self.x[index, None], self.y[index, None]
def __len__(self):
return len(self.x)
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(model, config):
"""Returns optimizer."""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-4))
def data_creator(batch_size, config):
"""Returns training dataloader, validation dataloader."""
train_dataset = LinearDataset(2, 5)
validation_dataset = LinearDataset(2, 5, size=400)
train_sampler = None
if distributed.is_initialized():
train_sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler)
validation_sampler = None
if distributed.is_initialized():
validation_sampler = DistributedSampler(validation_dataset)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=batch_size,
shuffle=(validation_sampler is None),
sampler=validation_sampler)
return train_loader, validation_loader
def tune_example(num_replicas=1, use_gpu=False):
config = {
"model_creator": tune.function(model_creator),
"data_creator": tune.function(data_creator),
"optimizer_creator": tune.function(optimizer_creator),
"loss_creator": tune.function(lambda config: nn.MSELoss()),
"num_replicas": num_replicas,
"use_gpu": use_gpu,
"batch_size": 512,
"backend": "gloo"
}
analysis = tune.run(
PyTorchTrainable,
num_samples=12,
config=config,
stop={"training_iteration": 2},
verbose=1)
return analysis.get_best_config(metric="validation_loss", mode="min")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--tune", action="store_true", default=False, help="Tune training")
args, _ = parser.parse_known_args()
ray.init(address=args.address)
tune_example(num_replicas=args.num_replicas, use_gpu=args.use_gpu)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/__init__.py
|
Python
|
from ray.experimental.sgd.pytorch.pytorch_trainer import (PyTorchTrainer,
PyTorchTrainable)
__all__ = ["PyTorchTrainer", "PyTorchTrainable"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/distributed_pytorch_runner.py
|
Python
|
import collections
from filelock import FileLock
import logging
import os
import torch.nn as nn
import torch.distributed as dist
import torch.utils.data
from torch.nn.parallel import DistributedDataParallel
from ray.experimental.sgd.pytorch.pytorch_runner import PyTorchRunner
logger = logging.getLogger(__name__)
class DistributedPyTorchRunner(PyTorchRunner):
"""Manages a distributed PyTorch model replica."""
def __init__(self, *args, backend="gloo", **kwargs):
"""Initializes the runner.
Args:
args: Arguments for the PyTorchRunner.
kwargs: Keyword arguments for the PyTorchRunner.
backend (string): backend used by distributed PyTorch.
"""
super(DistributedPyTorchRunner, self).__init__(*args, **kwargs)
self.backend = backend
def setup(self, url, world_rank, world_size):
"""Connects to the distributed PyTorch backend and initializes the model.
Args:
url (str): the URL used to connect to distributed PyTorch.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
self._setup_distributed_pytorch(url, world_rank, world_size)
self._setup_training()
def _setup_distributed_pytorch(self, url, world_rank, world_size):
with self._timers["setup_proc"]:
self.world_rank = world_rank
logger.debug(
"Connecting to {} world_rank: {} world_size: {}".format(
url, world_rank, world_size))
logger.debug("using {}".format(self.backend))
dist.init_process_group(
backend=self.backend,
init_method=url,
rank=world_rank,
world_size=world_size)
def _setup_training(self):
logger.debug("Creating model")
self.models = self.model_creator(self.config)
if not isinstance(self.models, collections.Iterable):
self.models = [self.models]
assert all(isinstance(model, nn.Module) for model in self.models), (
"All models must be PyTorch models: {}.".format(self.models))
if torch.cuda.is_available():
self.models = [model.cuda() for model in self.models]
self.models = [DistributedDataParallel(model) for model in self.models]
logger.debug("Creating optimizer.")
self.optimizers = self.optimizer_creator(self.given_models,
self.config)
if not isinstance(self.optimizers, collections.Iterable):
self.optimizers = [self.optimizers]
self.criterion = self.loss_creator(self.config)
if torch.cuda.is_available():
self.criterion = self.criterion.cuda()
logger.debug("Creating dataset.")
with FileLock(os.path.expanduser("~/.ray_data.lock")):
data_loaders = self.data_creator(self.batch_size, self.config)
self.train_loader, self.validation_loader = self._validate_loaders(
data_loaders)
def step(self):
"""Runs a training epoch and updates the model parameters.
Automatically sets epoch of sampler if possible.
"""
logger.debug("Starting step")
if hasattr(self.train_loader.sampler, "set_epoch"):
self.train_loader.sampler.set_epoch(self.epoch)
return super(DistributedPyTorchRunner, self).step()
def get_state(self):
"""Returns the state of the runner."""
# This is so that we create a duplicate of weights into CPU rather than
# move the model weights entirely out of the GPU, so that we can
# resume training while saving intermediate checkpoints.
cpu_state_dicts = []
for model in self.models:
state_dict = model.module.state_dict()
for k, v in state_dict.items():
state_dict[k] = v.cpu()
cpu_state_dicts += [state_dict]
return {
"epoch": self.epoch,
"models": cpu_state_dicts,
"optimizers": [opt.state_dict() for opt in self.optimizers],
"stats": self.stats()
}
def set_state(self, state):
"""Sets the state of the model."""
# TODO: restore timer stats
for model, model_state_dict in zip(self.models, state["models"]):
model.module.load_state_dict(model_state_dict)
for optimizer, opt_state_dict in zip(self.optimizers,
state["optimizers"]):
optimizer.load_state_dict(opt_state_dict)
self.epoch = state["stats"]["epoch"]
def shutdown(self):
"""Attempts to shut down the worker."""
super(DistributedPyTorchRunner, self).shutdown()
dist.destroy_process_group()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/examples/dcgan.py
|
Python
|
#!/usr/bin/env python
import argparse
import os
import torch
import torch.nn as nn
from torch import distributed
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
from torch.autograd import Variable
from torch.nn import functional as F
from torch.utils.data.distributed import DistributedSampler
from scipy.stats import entropy
import ray
from ray.experimental.sgd.pytorch import PyTorchTrainer
# Training parameters
TRAIN_BATCHES = 5
# Number of channels in the training images. For color images this is 3
num_channels = 1
# Size of z latent vector (i.e. size of generator input)
latent_vector_size = 100
# Size of feature maps in generator
features_g = 32
# Size of feature maps in discriminator
features_d = 32
def data_creator(batch_size, config):
dataset = dset.MNIST(
root="~/mnist/",
download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
]))
# Create the dataloader
train_sampler = None
if distributed.is_initialized():
train_sampler = DistributedSampler(dataset)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
num_workers=3,
shuffle=(train_sampler is None),
sampler=train_sampler)
return dataloader, None
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(
latent_vector_size, features_g * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(features_g * 4),
nn.ReLU(True),
nn.ConvTranspose2d(
features_g * 4, features_g * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(features_g * 2),
nn.ReLU(True),
nn.ConvTranspose2d(
features_g * 2, features_g, 4, 2, 1, bias=False),
nn.BatchNorm2d(features_g),
nn.ReLU(True),
nn.ConvTranspose2d(features_g, num_channels, 4, 2, 1, bias=False),
nn.Tanh())
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(num_channels, features_d, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(features_d, features_d * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(features_d * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(features_d * 2, features_d * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(features_d * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(features_d * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
class Net(nn.Module):
"""LeNet for MNist classification, used for inception_score."""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def inception_score(imgs, batch_size=32, splits=1):
N = len(imgs)
dtype = torch.FloatTensor
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
cm = Net()
cm.load_state_dict(torch.load(model_path))
cm.eval()
up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype)
def get_pred(x):
x = up(x)
x = cm(x)
return F.softmax(x).data.cpu().numpy()
preds = np.zeros((N, 10))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits):(k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
def model_creator(config):
netD = Discriminator()
netD.apply(weights_init)
netG = Generator()
netG.apply(weights_init)
return netD, netG
def train(models, dataloader, criterion, optimizers, config):
netD, netG = models
optimD, optimG = optimizers
real_label = 1
fake_label = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for i, data in enumerate(dataloader, 0):
if i >= TRAIN_BATCHES and config.get("test_mode"):
break
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size, ), real_label, device=device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
noise = torch.randn(b_size, latent_vector_size, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
optimG.step()
is_score, is_std = inception_score(fake)
return {
"loss_g": errG.item(),
"loss_d": errD.item(),
"inception": is_score
}
def optimizer_creator(models, config):
net_d, net_g = models
discriminator_opt = optim.Adam(
net_d.parameters(), lr=config.get("lr", 0.01), betas=(0.5, 0.999))
generator_opt = optim.Adam(
net_g.parameters(), lr=config.get("lr", 0.01), betas=(0.5, 0.999))
return discriminator_opt, generator_opt
def train_example(num_replicas=1, use_gpu=False, test_mode=False):
config = {"test_mode": test_mode}
trainer = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
lambda config: nn.BCELoss(),
train_function=train,
validation_function=False,
num_replicas=num_replicas,
config=config,
use_gpu=use_gpu,
batch_size=16 if test_mode else 512,
backend="nccl" if use_gpu else "gloo")
for i in range(10):
stats = trainer.train(max_retries=3)
print(stats)
return trainer
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Redis")
parser.add_argument(
"--num-replicas",
"-n",
type=int,
default=1,
help="Sets number of replicas for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
args, _ = parser.parse_known_args()
ray.init(address=args.address)
path = os.path.dirname(ray.__file__)
model_path = os.path.join(
path, "experimental/sgd/pytorch/examples/mnist_cnn.pt")
# load the pretrained mnist classification model for inception_score
trainer = train_example(
num_replicas=args.num_replicas,
use_gpu=args.use_gpu,
test_mode=args.smoke_test)
models = trainer.get_model()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/pytorch_runner.py
|
Python
|
import collections
from filelock import FileLock
import logging
import os
import torch
import torch.utils.data
from torch.utils.data import DataLoader
import ray
from ray.experimental.sgd.pytorch import utils as pytorch_utils
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
class PyTorchRunner:
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
data_creator,
optimizer_creator,
loss_creator,
train_function=None,
validation_function=None,
config=None,
batch_size=16):
"""Initializes the runner.
Args:
model_creator (dict -> torch.nn.Module): see pytorch_trainer.py
data_creator (int, dict -> DataLoader, DataLoader): see
pytorch_trainer.py.
optimizer_creator (torch.nn.Module, dict -> loss, optimizer):
see pytorch_trainer.py.
loss_creator (dict -> loss): see pytorch_trainer.py.
train_function: see pytorch_trainer.py
validation_function: see pytorch_trainer.py
config (dict): see pytorch_trainer.py.
batch_size (int): see pytorch_trainer.py.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.config = {} if config is None else config
self.train_function = train_function or pytorch_utils.train
self.validation_function = (validation_function
or pytorch_utils.validate)
self.batch_size = batch_size
self.verbose = True
self.epoch = 0
self._timers = {
k: utils.TimerStat(window_size=1)
for k in [
"setup_proc", "setup_model", "get_state", "set_state",
"validation", "training"
]
}
self.models = None
self.optimizers = None
self.criterion = None
self.train_loader = None
self.validation_loader = None
def _validate_loaders(self, data_loaders):
assert data_loaders, "Dataloaders need to be returned in data_creator."
if isinstance(data_loaders, DataLoader):
return data_loaders, None
elif len(data_loaders) == 2 and isinstance(data_loaders[0],
DataLoader):
return data_loaders
else:
raise ValueError(
"Dataloaders must be <= 2. Got {}".format(data_loaders))
def setup(self):
"""Initializes the model."""
logger.debug("Creating model")
self.models = self.model_creator(self.config)
if not isinstance(self.models, collections.Iterable):
self.models = [self.models]
if torch.cuda.is_available():
self.models = [model.cuda() for model in self.models]
logger.debug("Creating optimizer")
self.optimizers = self.optimizer_creator(self.given_models,
self.config)
if not isinstance(self.optimizers, collections.Iterable):
self.optimizers = [self.optimizers]
self.criterion = self.loss_creator(self.config)
if torch.cuda.is_available():
self.criterion = self.criterion.cuda()
logger.debug("Creating dataset")
with FileLock(os.path.expanduser("~/.ray_data.lock")):
dataloaders = self.data_creator(self.batch_size, self.config)
self.train_loader, self.validation_loader = self._validate_loaders(
dataloaders)
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
def step(self):
"""Runs a training epoch and updates the model parameters."""
logger.debug("Begin Training Epoch {}".format(self.epoch + 1))
with self._timers["training"]:
train_stats = self.train_function(
self.given_models, self.train_loader, self.criterion,
self.given_optimizers, self.config)
train_stats["epoch"] = self.epoch
self.epoch += 1
train_stats.update(self.stats())
return train_stats
def validate(self):
"""Evaluates the model on the validation data set."""
if self.validation_loader is None:
raise ValueError("No validation dataloader provided.")
with self._timers["validation"]:
validation_stats = self.validation_function(
self.given_models, self.validation_loader, self.criterion,
self.config)
validation_stats.update(self.stats())
return validation_stats
def stats(self):
"""Returns a dictionary of statistics collected."""
stats = {"epoch": self.epoch}
for k, t in self._timers.items():
stats[k + "_time_mean"] = t.mean
stats[k + "_time_total"] = t.sum
t.reset()
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"models": [model.cpu().state_dict() for model in self.models],
"optimizers": [opt.state_dict() for opt in self.optimizers],
"stats": self.stats()
}
def set_state(self, state):
"""Sets the state of the model."""
# TODO: restore timer stats
for model, state_dict in zip(self.models, state["models"]):
model.load_state_dict(state_dict)
for optimizer, state_dict in zip(self.optimizers, state["optimizers"]):
optimizer.load_state_dict(state_dict)
self.epoch = state["stats"]["epoch"]
def apply_fn(self, fn):
return fn(self)
def shutdown(self):
"""Attempts to shut down the worker."""
del self.validation_loader
del self.train_loader
del self.criterion
del self.optimizers
del self.models
if torch.cuda.is_available():
torch.cuda.empty_cache()
@property
def given_optimizers(self):
if len(self.optimizers) > 1:
return self.optimizers
else:
return self.optimizers[0]
@property
def given_models(self):
if len(self.models) > 1:
return self.models
else:
return self.models[0]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/pytorch_trainer.py
|
Python
|
import numpy as np
import os
import torch
import torch.distributed as dist
import logging
import numbers
import tempfile
import time
import ray
from ray.tune import Trainable
from ray.tune.trial import Resources
from ray.experimental.sgd.pytorch.distributed_pytorch_runner import (
DistributedPyTorchRunner)
from ray.experimental.sgd import utils
from ray.experimental.sgd.pytorch.pytorch_runner import PyTorchRunner
logger = logging.getLogger(__name__)
RESIZE_COOLDOWN_S = 10
class PyTorchTrainer:
"""Train a PyTorch model using distributed PyTorch.
Launches a set of actors which connect via distributed PyTorch and
coordinate gradient updates to train the provided model.
"""
def __init__(self,
model_creator,
data_creator,
optimizer_creator,
loss_creator,
train_function=None,
validation_function=None,
initialization_hook=None,
config=None,
num_replicas=1,
use_gpu=False,
batch_size=16,
backend="auto"):
"""Sets up the PyTorch trainer.
Args:
model_creator (dict -> torch.nn.Module): creates the model
using the config.
data_creator (int, dict -> DataLoader, DataLoader): Function that
takes in (batch_size, config) and returns two Torch DataLoader
objects.
optimizer_creator (torch.nn.Module, dict -> optimizer):
creates the loss and optimizer using the model and the config.
loss_creator (dict -> loss): Creates the loss function/criterion
using the config.
train_function: Trains a model for a epoch. This takes in (
model, train_dataloader, criterion, optimizer, config), and
returns a dict of training stats.
validation_function: Runs validation. This takes in (
model, val_dataloader, criterion, config) and returns a dict of
validation stats.
config (dict): configuration passed to "model_creator",
"data_creator", "optimizer_creator", and "loss_creator".
num_replicas (int): the number of workers used in distributed
training.
use_gpu (bool): Sets resource allocation for workers to 1 GPU
if true.
batch_size (int): batch size for an update.
backend (string): backend used by distributed PyTorch.
"""
# TODO: add support for mixed precision
# TODO: add support for callbacks
if num_replicas > 1 and not dist.is_available():
raise ValueError(
("Distributed PyTorch is not supported on macOS. "
"To run without distributed PyTorch, set 'num_replicas=1'. "
"For more information, see "
"https://github.com/pytorch/examples/issues/467."))
self.model_creator = model_creator
self.data_creator = data_creator
self.train_function = train_function
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.validation_function = validation_function
self.initialization_hook = initialization_hook
self.config = {} if config is None else config
self.optimizer_timer = utils.TimerStat(window_size=1)
if backend == "auto":
backend = "nccl" if use_gpu else "gloo"
logger.info("Using {} as backend.".format(backend))
self.backend = backend
self.use_gpu = use_gpu
self.batch_size = batch_size
self.max_replicas = num_replicas
self.temp_dir = tempfile.mkdtemp(prefix="raysgd")
self._num_failures = 0
self._last_resize = float("-inf")
self._start_workers(self.max_replicas)
def _start_workers(self, num_replicas):
logger.info(f"start_workers: Setting %d replicas." % num_replicas)
if num_replicas == 1:
# Generate actor class
Runner = ray.remote(
num_cpus=1, num_gpus=int(self.use_gpu))(PyTorchRunner)
# Start workers
self.workers = [
Runner.remote(
self.model_creator,
self.data_creator,
self.optimizer_creator,
self.loss_creator,
train_function=self.train_function,
validation_function=self.validation_function,
config=self.config,
batch_size=self.batch_size)
]
if self.initialization_hook:
self.apply_all_workers(self.initialization_hook)
# Get setup tasks in order to throw errors on failure
ray.get(self.workers[0].setup.remote())
else:
# Generate actor class
Runner = ray.remote(
num_cpus=1,
num_gpus=int(self.use_gpu))(DistributedPyTorchRunner)
# Compute batch size per replica
batch_size_per_replica = self.batch_size // num_replicas
if self.batch_size % num_replicas > 0:
new_batch_size = batch_size_per_replica * num_replicas
logger.warning(
("Changing batch size from {old_batch_size} to "
"{new_batch_size} to evenly distribute batches across "
"{num_replicas} replicas.").format(
old_batch_size=self.batch_size,
new_batch_size=new_batch_size,
num_replicas=num_replicas))
# Start workers
self.workers = [
Runner.remote(
self.model_creator,
self.data_creator,
self.optimizer_creator,
self.loss_creator,
backend=self.backend,
train_function=self.train_function,
validation_function=self.validation_function,
config=self.config,
batch_size=batch_size_per_replica)
for i in range(num_replicas)
]
if self.initialization_hook:
self.apply_all_workers(self.initialization_hook)
# Compute URL for initializing distributed PyTorch
ip = ray.get(self.workers[0].get_node_ip.remote())
port = ray.get(self.workers[0].find_free_port.remote())
address = "tcp://{ip}:{port}".format(ip=ip, port=port)
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup.remote(address, i, len(self.workers))
for i, worker in enumerate(self.workers)
])
def train(self, max_retries=10, checkpoint="auto"):
"""Runs a training epoch.
Runs an average over all values returned from workers. Set
`max_retries` to enable fault handling in case of instance preemption.
Args:
max_retries (int): Must be non-negative. If set to N, will
kill all current workers, query the Ray global state for
total available resources, and re-launch up to the
available resources. Behavior is not well-defined
in case of shared cluster usage.
checkpoint (str): Path to checkpoint to restore from if retrying.
If max_retries is set and checkpoint == "auto", PyTorchTrainer
will save a checkpoint before starting to train.
"""
assert max_retries >= 0, "`max_retries` must be non-negative."
if max_retries:
if checkpoint == "auto":
logger.debug("Retrying detected. Automatically checkpointing.")
checkpoint = self.save(
os.path.join(self.temp_dir, "tmp_checkpoint"))
elif not checkpoint:
raise ValueError("Cannot retry from empty checkpoint.")
if checkpoint and self._should_resize():
logger.info("Resize opportunity detected. Attempting to scale up.")
self._resize_workers(checkpoint=checkpoint)
with self.optimizer_timer:
success, worker_stats = self._train_step()
# Fault handling
for i in range(max_retries):
if success:
break
else:
self._num_failures += 1
self._resize_workers(checkpoint=checkpoint)
logger.info("Retrying training step with %d workers." % len(
self.workers))
success, worker_stats = self._train_step()
if not success:
raise RuntimeError("Training run failed.")
worker_stats = ray.get(worker_stats)
train_stats = {}
for stat_key in worker_stats[0]:
if isinstance(worker_stats[0], numbers.Number):
train_stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
else:
train_stats[stat_key] = worker_stats[0][stat_key]
return train_stats
def _train_step(self):
worker_stats = [w.step.remote() for w in self.workers]
success = utils.check_for_failure(worker_stats)
return success, worker_stats
def apply_all_workers(self, fn):
return ray.get([w.apply_fn.remote(fn) for w in self.workers])
def validate(self):
"""Evaluates the model on the validation data set."""
if self.validation_function is False:
return {}
worker_stats = ray.get([w.validate.remote() for w in self.workers])
validation_stats = {}
for stat_key in worker_stats[0]:
validation_stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
return validation_stats
def get_model(self):
"""Returns the learned model(s)."""
models = self.model_creator(self.config)
state = ray.get(self.workers[0].get_state.remote())
if len(state["models"]) == 1:
models.load_state_dict(state["models"][0])
else:
for model, state_dict in zip(models, state["models"]):
model.load_state_dict(state_dict)
return models
def save(self, checkpoint):
"""Saves the model(s) to the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = ray.get(self.workers[0].get_state.remote())
torch.save(state, checkpoint)
return checkpoint
def restore(self, checkpoint):
"""Restores the model from the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = torch.load(checkpoint)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.workers])
def shutdown(self, force=False):
"""Shuts down workers and releases resources."""
for worker in self.workers:
if not force:
worker.shutdown.remote()
worker.__ray_terminate__.remote()
else:
logger.warning("Killing worker {}.".format(worker))
worker.__ray_kill__()
self.workers = []
def _resize_workers(self, checkpoint, max_retries=10):
# check available resources
self.shutdown(force=True)
assert checkpoint, "Cannot restore without checkpoint."
time.sleep(1)
for i in range(max_retries):
resources = ray.available_resources()
new_workers = min(resources.get("CPU", 0), self.max_replicas)
if self.use_gpu:
new_workers = min(resources.get("GPU", 0), new_workers)
if new_workers:
self._last_resize = time.time()
self._start_workers(int(new_workers))
self.restore(checkpoint)
return
else:
delay = 2**i
logger.info("Resources: {}".format(resources))
logger.warning(
"No new workers found. Retrying in %d sec." % delay)
time.sleep(delay)
raise RuntimeError("Exceeded max_retries for relaunching workers.")
def _should_resize(self):
"""Returns True if past cooldown and exists resources to scale up."""
worker_gap = self.max_replicas - len(self.workers)
past_cooldown = (time.time() - self._last_resize) > RESIZE_COOLDOWN_S
if past_cooldown and worker_gap:
resources = ray.available_resources()
potential_workers = min(resources.get("CPU", 0), self.max_replicas)
if self.use_gpu:
potential_workers = min(
resources.get("GPU", 0), potential_workers)
return potential_workers > 0
return False
class PyTorchTrainable(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(
cpu=0,
gpu=0,
extra_cpu=config["num_replicas"],
extra_gpu=int(config["use_gpu"]) * config["num_replicas"])
def _setup(self, config):
self._trainer = PyTorchTrainer(**config)
def _train(self):
train_stats = self._trainer.train()
validation_stats = self._trainer.validate()
train_stats.update(validation_stats)
# output {"mean_loss": test_loss, "mean_accuracy": accuracy}
return train_stats
def _save(self, checkpoint_dir):
return self._trainer.save(os.path.join(checkpoint_dir, "model.pth"))
def _restore(self, checkpoint_path):
return self._trainer.restore(checkpoint_path)
def _stop(self):
self._trainer.shutdown()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/resnet.py
|
Python
|
"""ResNet in PyTorch.
Copied from https://github.com/kuangliu/pytorch-cifar/
blob/ab908327d44bf9b1d22cd333a4466e85083d3f21/models/resnet.py
"""
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(_):
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34(_):
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50(_):
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101(_):
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152(_):
return ResNet(Bottleneck, [3, 8, 36, 3])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/pytorch/utils.py
|
Python
|
import collections
import time
import torch
from ray.experimental.sgd.utils import TimerStat
def train(model, train_iterator, criterion, optimizer, config):
"""Runs 1 training epoch"""
if isinstance(model, collections.Iterable) or isinstance(
optimizer, collections.Iterable):
raise ValueError(
"Need to provide custom training function if using multi-model "
"or multi-optimizer training.")
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
timers = {k: TimerStat() for k in ["d2h", "fwd", "grad", "apply"]}
# switch to train mode
model.train()
end = time.time()
for i, (features, target) in enumerate(train_iterator):
# measure data loading time
data_time.update(time.time() - end)
# Create non_blocking tensors for distributed training
with timers["d2h"]:
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with timers["fwd"]:
output = model(features)
loss = criterion(output, target)
# measure accuracy and record loss
losses.update(loss.item(), features.size(0))
with timers["grad"]:
# compute gradients in a backward pass
optimizer.zero_grad()
loss.backward()
with timers["apply"]:
# Call step of optimizer to update model params
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
stats = {
"batch_time": batch_time.avg,
"batch_processed": losses.count,
"train_loss": losses.avg,
"data_time": data_time.avg,
}
stats.update({k: t.mean for k, t in timers.items()})
return stats
def validate(model, val_iterator, criterion, config):
if isinstance(model, collections.Iterable):
raise ValueError(
"Need to provide custom validation function if using multi-model "
"training.")
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
correct = 0
total = 0
with torch.no_grad():
end = time.time()
for i, (features, target) in enumerate(val_iterator):
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(features)
loss = criterion(output, target)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# measure accuracy and record loss
losses.update(loss.item(), features.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
stats = {"batch_time": batch_time.avg, "validation_loss": losses.avg}
stats.update(mean_accuracy=correct / total)
return stats
class AverageMeter:
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tests/test_pytorch.py
|
Python
|
import os
import tempfile
from unittest.mock import patch
import pytest
import time
import torch
import torch.nn as nn
import torch.distributed as dist
import ray
from ray import tune
from ray.tests.conftest import ray_start_2_cpus # noqa: F401
from ray.experimental.sgd.pytorch import PyTorchTrainer, PyTorchTrainable
from ray.experimental.sgd.pytorch.utils import train
from ray.experimental.sgd.utils import check_for_failure
from ray.experimental.sgd.examples.train_example import (
model_creator, optimizer_creator, data_creator, LinearDataset)
@pytest.mark.parametrize("num_replicas", [1, 2]
if dist.is_available() else [1])
def test_train(ray_start_2_cpus, num_replicas): # noqa: F811
trainer = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=num_replicas)
train_loss1 = trainer.train()["train_loss"]
validation_loss1 = trainer.validate()["validation_loss"]
train_loss2 = trainer.train()["train_loss"]
validation_loss2 = trainer.validate()["validation_loss"]
print(train_loss1, train_loss2)
print(validation_loss1, validation_loss2)
assert train_loss2 <= train_loss1
assert validation_loss2 <= validation_loss1
@pytest.mark.parametrize("num_replicas", [1, 2]
if dist.is_available() else [1])
def test_multi_model(ray_start_2_cpus, num_replicas): # noqa: F811
def custom_train(models, dataloader, criterion, optimizers, config):
result = {}
for i, (model, optimizer) in enumerate(zip(models, optimizers)):
result["model_{}".format(i)] = train(model, dataloader, criterion,
optimizer, config)
return result
def multi_model_creator(config):
return nn.Linear(1, 1), nn.Linear(1, 1)
def multi_optimizer_creator(models, config):
opts = [
torch.optim.SGD(model.parameters(), lr=0.0001) for model in models
]
return opts[0], opts[1]
trainer1 = PyTorchTrainer(
multi_model_creator,
data_creator,
multi_optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
train_function=custom_train,
num_replicas=num_replicas)
trainer1.train()
filename = os.path.join(tempfile.mkdtemp(), "checkpoint")
trainer1.save(filename)
models1 = trainer1.get_model()
trainer1.shutdown()
trainer2 = PyTorchTrainer(
multi_model_creator,
data_creator,
multi_optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=num_replicas)
trainer2.restore(filename)
os.remove(filename)
models2 = trainer2.get_model()
for model_1, model_2 in zip(models1, models2):
model1_state_dict = model_1.state_dict()
model2_state_dict = model_2.state_dict()
assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())
for k in model1_state_dict:
assert torch.equal(model1_state_dict[k], model2_state_dict[k])
@pytest.mark.parametrize("num_replicas", [1, 2]
if dist.is_available() else [1])
def test_tune_train(ray_start_2_cpus, num_replicas): # noqa: F811
config = {
"model_creator": model_creator,
"data_creator": data_creator,
"optimizer_creator": optimizer_creator,
"loss_creator": lambda config: nn.MSELoss(),
"num_replicas": num_replicas,
"use_gpu": False,
"batch_size": 512,
"backend": "gloo"
}
analysis = tune.run(
PyTorchTrainable,
num_samples=2,
config=config,
stop={"training_iteration": 2},
verbose=1)
# checks loss decreasing for every trials
for path, df in analysis.trial_dataframes.items():
train_loss1 = df.loc[0, "train_loss"]
train_loss2 = df.loc[1, "train_loss"]
validation_loss1 = df.loc[0, "validation_loss"]
validation_loss2 = df.loc[1, "validation_loss"]
assert train_loss2 <= train_loss1
assert validation_loss2 <= validation_loss1
@pytest.mark.parametrize("num_replicas", [1, 2]
if dist.is_available() else [1])
def test_save_and_restore(ray_start_2_cpus, num_replicas): # noqa: F811
trainer1 = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=num_replicas)
trainer1.train()
filename = os.path.join(tempfile.mkdtemp(), "checkpoint")
trainer1.save(filename)
model1 = trainer1.get_model()
trainer1.shutdown()
trainer2 = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=num_replicas)
trainer2.restore(filename)
os.remove(filename)
model2 = trainer2.get_model()
model1_state_dict = model1.state_dict()
model2_state_dict = model2.state_dict()
assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())
for k in model1_state_dict:
assert torch.equal(model1_state_dict[k], model2_state_dict[k])
def test_fail_with_recover(ray_start_2_cpus): # noqa: F811
if not dist.is_available():
return
def single_loader(batch_size, config):
train_dataset = LinearDataset(2, 5, size=1000000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size)
return train_loader
def step_with_fail(self):
worker_stats = [w.step.remote() for w in self.workers]
if self._num_failures < 3:
time.sleep(1) # Make the batch will fail correctly.
self.workers[0].__ray_kill__()
success = check_for_failure(worker_stats)
return success, worker_stats
with patch.object(PyTorchTrainer, "_train_step", step_with_fail):
trainer1 = PyTorchTrainer(
model_creator,
single_loader,
optimizer_creator,
batch_size=100000,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=2)
with pytest.raises(RuntimeError):
trainer1.train(max_retries=1)
def test_resize(ray_start_2_cpus): # noqa: F811
if not dist.is_available():
return
def single_loader(batch_size, config):
train_dataset = LinearDataset(2, 5, size=1000000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size)
return train_loader
def step_with_fail(self):
worker_stats = [w.step.remote() for w in self.workers]
if self._num_failures < 1:
time.sleep(1) # Make the batch will fail correctly.
self.workers[0].__ray_kill__()
success = check_for_failure(worker_stats)
return success, worker_stats
with patch.object(PyTorchTrainer, "_train_step", step_with_fail):
trainer1 = PyTorchTrainer(
model_creator,
single_loader,
optimizer_creator,
batch_size=100000,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=2)
@ray.remote
def try_test():
import time
time.sleep(100)
try_test.remote()
trainer1.train(max_retries=1)
assert len(trainer1.workers) == 1
def test_fail_twice(ray_start_2_cpus): # noqa: F811
if not dist.is_available():
return
def single_loader(batch_size, config):
train_dataset = LinearDataset(2, 5, size=1000000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size)
return train_loader
def step_with_fail(self):
worker_stats = [w.step.remote() for w in self.workers]
if self._num_failures < 2:
time.sleep(1)
self.workers[0].__ray_kill__()
success = check_for_failure(worker_stats)
return success, worker_stats
with patch.object(PyTorchTrainer, "_train_step", step_with_fail):
trainer1 = PyTorchTrainer(
model_creator,
single_loader,
optimizer_creator,
batch_size=100000,
loss_creator=lambda config: nn.MSELoss(),
num_replicas=2)
trainer1.train(max_retries=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tests/test_pytorch_runner.py
|
Python
|
import numpy as np
import torch
import torch.nn as nn
import unittest
from unittest.mock import MagicMock
from ray.experimental.sgd.pytorch.pytorch_runner import PyTorchRunner
class LinearDataset(torch.utils.data.Dataset):
"""y = a * x + b"""
def __init__(self, a, b, size=1000):
x = np.random.random(size).astype(np.float32) * 10
x = np.arange(0, 10, 10 / size, dtype=np.float32)
self.x = torch.from_numpy(x)
self.y = torch.from_numpy(a * x + b)
def __getitem__(self, index):
return self.x[index, None], self.y[index, None]
def __len__(self):
return len(self.x)
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(models, config):
"""Returns optimizer."""
return torch.optim.SGD(models.parameters(), lr=0.1)
def loss_creator(config):
return nn.MSELoss()
def single_loader(batch_size, config):
train_dataset = LinearDataset(2, 5)
train_loader = torch.utils.data.DataLoader(train_dataset)
return train_loader
def create_dataloaders(batch_size, config):
train_dataset = LinearDataset(2, 5)
validation_dataset = LinearDataset(2, 5, size=400)
train_loader = torch.utils.data.DataLoader(train_dataset)
validation_loader = torch.utils.data.DataLoader(validation_dataset)
return train_loader, validation_loader
class TestPyTorchRunner(unittest.TestCase):
def testValidate(self):
mock_function = MagicMock(returns=dict(mean_accuracy=10))
runner = PyTorchRunner(
model_creator,
create_dataloaders,
optimizer_creator,
loss_creator,
validation_function=mock_function)
runner.setup()
runner.step()
runner.step()
runner.step()
self.assertEqual(mock_function.call_count, 0)
runner.validate()
self.assertTrue(mock_function.called)
self.assertEqual(runner.stats()["epoch"], 3)
def testStep(self):
mock_function = MagicMock(return_value=dict(mean_accuracy=10))
runner = PyTorchRunner(
model_creator,
create_dataloaders,
optimizer_creator,
loss_creator,
train_function=mock_function)
runner.setup()
runner.step()
runner.step()
result = runner.step()
self.assertEqual(mock_function.call_count, 3)
self.assertEqual(result["epoch"], 3)
self.assertEqual(runner.stats()["epoch"], 3)
def testGivens(self):
def three_model_creator(config):
return nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1)
def three_optimizer_creator(models, config):
opts = [
torch.optim.SGD(model.parameters(), lr=0.1) for model in models
]
return opts[0], opts[1], opts[2]
runner = PyTorchRunner(three_model_creator, single_loader,
three_optimizer_creator, loss_creator)
runner.setup()
self.assertEqual(len(runner.given_models), 3)
self.assertEqual(len(runner.given_optimizers), 3)
runner2 = PyTorchRunner(model_creator, single_loader,
optimizer_creator, loss_creator)
runner2.setup()
self.assertNotEqual(runner2.given_models, runner2.models)
self.assertNotEqual(runner2.given_optimizers, runner2.optimizers)
def testMultiLoaders(self):
def three_data_loader(batch_size, config):
train_dataset = LinearDataset(2, 5)
validation_dataset = LinearDataset(2, 5, size=400)
train_loader = torch.utils.data.DataLoader(train_dataset)
validation_loader = torch.utils.data.DataLoader(validation_dataset)
return train_loader, validation_loader, validation_loader
runner = PyTorchRunner(model_creator, three_data_loader,
optimizer_creator, loss_creator)
with self.assertRaises(ValueError):
runner.setup()
runner2 = PyTorchRunner(model_creator, three_data_loader,
optimizer_creator, loss_creator)
with self.assertRaises(ValueError):
runner2.setup()
def testSingleLoader(self):
runner = PyTorchRunner(model_creator, single_loader, optimizer_creator,
loss_creator)
runner.setup()
runner.step()
with self.assertRaises(ValueError):
runner.validate()
def testMultiModel(self):
def multi_model_creator(config):
return nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1)
def multi_optimizer_creator(models, config):
opts = [
torch.optim.SGD(model.parameters(), lr=0.1) for model in models
]
return opts[0], opts[1], opts[2]
runner = PyTorchRunner(multi_model_creator, single_loader,
multi_optimizer_creator, loss_creator)
runner.setup()
with self.assertRaises(ValueError):
runner.step()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tests/test_tensorflow.py
|
Python
|
import os
import pytest
import tempfile
import numpy as np
import shutil
from ray import tune
from ray.tests.conftest import ray_start_2_cpus # noqa: F401
from ray.experimental.sgd.tf import TFTrainer, TFTrainable
from ray.experimental.sgd.examples.tensorflow_train_example import (
simple_model, simple_dataset)
SIMPLE_CONFIG = {
"batch_size": 128,
"fit_config": {
"steps_per_epoch": 3,
},
"evaluate_config": {
"steps": 3,
}
}
@pytest.mark.parametrize( # noqa: F811
"num_replicas", [1, 2])
def test_train(ray_start_2_cpus, num_replicas): # noqa: F811
trainer = TFTrainer(
model_creator=simple_model,
data_creator=simple_dataset,
num_replicas=num_replicas,
config=SIMPLE_CONFIG)
train_stats1 = trainer.train()
train_stats1.update(trainer.validate())
train_stats2 = trainer.train()
train_stats2.update(trainer.validate())
@pytest.mark.parametrize( # noqa: F811
"num_replicas", [1, 2])
def test_tune_train(ray_start_2_cpus, num_replicas): # noqa: F811
config = {
"model_creator": tune.function(simple_model),
"data_creator": tune.function(simple_dataset),
"num_replicas": num_replicas,
"use_gpu": False,
"trainer_config": SIMPLE_CONFIG
}
tune.run(
TFTrainable,
num_samples=2,
config=config,
stop={"training_iteration": 2},
verbose=1)
@pytest.mark.parametrize( # noqa: F811
"num_replicas", [1, 2])
def test_save_and_restore(ray_start_2_cpus, num_replicas): # noqa: F811
trainer1 = TFTrainer(
model_creator=simple_model,
data_creator=simple_dataset,
num_replicas=num_replicas,
config=SIMPLE_CONFIG)
trainer1.train()
tmpdir = tempfile.mkdtemp()
filename = os.path.join(tmpdir, "checkpoint")
trainer1.save(filename)
model1 = trainer1.get_model()
trainer1.shutdown()
trainer2 = TFTrainer(
model_creator=simple_model,
data_creator=simple_dataset,
num_replicas=num_replicas,
config=SIMPLE_CONFIG)
trainer2.restore(filename)
model2 = trainer2.get_model()
trainer2.shutdown()
shutil.rmtree(tmpdir)
model1_config = model1.get_config()
model2_config = model2.get_config()
assert _compare(model1_config, model2_config, skip_keys=["name"])
model1_weights = model1.get_weights()
model2_weights = model2.get_weights()
assert _compare(model1_weights, model2_weights)
model1_opt_weights = model1.optimizer.get_weights()
model2_opt_weights = model2.optimizer.get_weights()
assert _compare(model1_opt_weights, model2_opt_weights)
def _compare(d1, d2, skip_keys=None):
"""Compare two lists or dictionaries or array"""
if type(d1) != type(d2):
return False
if isinstance(d1, dict):
if set(d1) != set(d2):
return False
for key in d1:
if skip_keys is not None and key in skip_keys:
continue
if not _compare(d1[key], d2[key], skip_keys=skip_keys):
return False
elif isinstance(d1, list):
for i, _ in enumerate(d1):
if not _compare(d1[i], d2[i], skip_keys=skip_keys):
return False
elif isinstance(d1, np.ndarray):
if not np.array_equal(d1, d2):
return False
else:
if d1 != d2:
return False
return True
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tf/__init__.py
|
Python
|
from ray.experimental.sgd.tf.tf_trainer import (TFTrainer, TFTrainable)
__all__ = ["TFTrainer", "TFTrainable"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tf/tf_runner.py
|
Python
|
import logging
import json
import os
import numpy as np
import ray
import ray.services
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
def _try_import_strategy():
"""Late import for Tesnorflow"""
import tensorflow as tf
return tf.distribute.experimental.MultiWorkerMirroredStrategy
class TFRunner:
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, data_creator, config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating dataset")
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model")
self.model = self.model_creator(self.config)
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
MultiWorkerMirroredStrategy = _try_import_strategy()
self.strategy = MultiWorkerMirroredStrategy()
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
def step(self):
"""Runs a training epoch and updates the model parameters."""
fit_default_config = {"verbose": self.verbose}
fit_default_config.update(self.config.get("fit_config", {}))
history = self.model.fit(self.train_dataset, **fit_default_config)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += 1
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
stats = {}
evaluate_config = {"verbose": self.verbose}
evaluate_config.update(self.config.get("evaluate_config", {}))
results = self.model.evaluate(self.test_dataset, **evaluate_config)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(self.test_dataset,
**evaluate_config)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"loss": results}
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.model = self.model_creator(self.config)
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if self.model.optimizer.weights == []:
self.model._make_train_function()
self.model.optimizer.set_weights(state["optimizer_weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
del self.train_dataset
del self.test_dataset
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/tf/tf_trainer.py
|
Python
|
import numpy as np
import os
import logging
import pickle
import ray
from ray.tune import Trainable
from ray.tune.resources import Resources
from ray.experimental.sgd.tf.tf_runner import TFRunner
logger = logging.getLogger(__name__)
class TFTrainer:
def __init__(self,
model_creator,
data_creator,
config=None,
num_replicas=1,
use_gpu=False,
verbose=False):
"""Sets up the TensorFlow trainer.
Args:
model_creator (dict -> Model): This function takes in the `config`
dict and returns a compiled TF model.
data_creator (dict -> tf.Dataset, tf.Dataset): Creates
the training and validation data sets using the config.
`config` dict is passed into the function.
config (dict): configuration passed to 'model_creator',
'data_creator'. Also contains `fit_config`, which is passed
into `model.fit(data, **fit_config)` and
`evaluate_config` which is passed into `model.evaluate`.
num_replicas (int): Sets number of workers used in distributed
training. Workers will be placed arbitrarily across the
cluster.
use_gpu (bool): Enables all workers to use GPU.
verbose (bool): Prints output of one model if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.use_gpu = use_gpu
self.num_replicas = num_replicas
self.verbose = verbose
# Generate actor class
Runner = ray.remote(num_cpus=1, num_gpus=int(use_gpu))(TFRunner)
if num_replicas == 1:
# Start workers
self.workers = [
Runner.remote(
model_creator,
data_creator,
config=self.config,
verbose=self.verbose)
]
# Get setup tasks in order to throw errors on failure
ray.get(self.workers[0].setup.remote())
else:
# Start workers
self.workers = [
Runner.remote(
model_creator,
data_creator,
config=self.config,
verbose=self.verbose and i == 0)
for i in range(num_replicas)
]
# Compute URL for initializing distributed setup
ips = ray.get(
[worker.get_node_ip.remote() for worker in self.workers])
ports = ray.get(
[worker.find_free_port.remote() for worker in self.workers])
urls = [
"{ip}:{port}".format(ip=ips[i], port=ports[i])
for i in range(len(self.workers))
]
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup_distributed.remote(urls, i, len(self.workers))
for i, worker in enumerate(self.workers)
])
def train(self):
"""Runs a training epoch."""
worker_stats = ray.get([w.step.remote() for w in self.workers])
stats = worker_stats[0].copy()
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
logger.info("Starting validation step.")
stats = ray.get([w.validate.remote() for w in self.workers])
stats = stats[0].copy()
return stats
def get_model(self):
"""Returns the learned model."""
state = ray.get(self.workers[0].get_state.remote())
return self._get_model_from_state(state)
def save(self, checkpoint):
"""Saves the model at the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = ray.get(self.workers[0].get_state.remote())
with open(checkpoint, "wb") as f:
pickle.dump(state, f)
return checkpoint
def restore(self, checkpoint):
"""Restores the model from the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
with open(checkpoint, "rb") as f:
state = pickle.load(f)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.workers])
def shutdown(self):
"""Shuts down workers and releases resources."""
for worker in self.workers:
worker.shutdown.remote()
worker.__ray_terminate__.remote()
def _get_model_from_state(self, state):
"""Creates model and load weights from state"""
model = self.model_creator(self.config)
model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if model.optimizer.weights == []:
model._make_train_function()
model.optimizer.set_weights(state["optimizer_weights"])
return model
class TFTrainable(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(
cpu=0,
gpu=0,
extra_cpu=config["num_replicas"],
extra_gpu=int(config["use_gpu"]) * config["num_replicas"])
def _setup(self, config):
self._trainer = TFTrainer(
model_creator=config["model_creator"],
data_creator=config["data_creator"],
config=config.get("trainer_config", {}),
num_replicas=config["num_replicas"],
use_gpu=config["use_gpu"])
def _train(self):
train_stats = self._trainer.train()
validation_stats = self._trainer.validate()
train_stats.update(validation_stats)
return train_stats
def _save(self, checkpoint_dir):
return self._trainer.save(os.path.join(checkpoint_dir, "model"))
def _restore(self, checkpoint_path):
return self._trainer.restore(checkpoint_path)
def _stop(self):
self._trainer.shutdown()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/sgd/utils.py
|
Python
|
from contextlib import closing
import logging
import numpy as np
import socket
import time
import ray
from ray.exceptions import RayActorError
logger = logging.getLogger(__name__)
class TimerStat:
"""A running stat for conveniently logging the duration of a code block.
Note that this class is *not* thread-safe.
Examples:
Time a call to 'time.sleep'.
>>> import time
>>> sleep_timer = TimerStat()
>>> with sleep_timer:
... time.sleep(1)
>>> round(sleep_timer.mean)
1
"""
def __init__(self, window_size=10):
self._window_size = window_size
self._samples = []
self._units_processed = []
self._start_time = None
self._total_time = 0.0
self.count = 0
def __enter__(self):
assert self._start_time is None, "concurrent updates not supported"
self._start_time = time.time()
def __exit__(self, type, value, tb):
assert self._start_time is not None
time_delta = time.time() - self._start_time
self.push(time_delta)
self._start_time = None
def push(self, time_delta):
self._samples.append(time_delta)
if len(self._samples) > self._window_size:
self._samples.pop(0)
self.count += 1
self._total_time += time_delta
def push_units_processed(self, n):
self._units_processed.append(n)
if len(self._units_processed) > self._window_size:
self._units_processed.pop(0)
@property
def mean(self):
return np.mean(self._samples)
@property
def median(self):
return np.median(self._samples)
@property
def sum(self):
return np.sum(self._samples)
@property
def max(self):
return np.max(self._samples)
@property
def first(self):
return self._samples[0] if self._samples else None
@property
def last(self):
return self._samples[-1] if self._samples else None
@property
def size(self):
return len(self._samples)
@property
def mean_units_processed(self):
return float(np.mean(self._units_processed))
@property
def mean_throughput(self):
time_total = sum(self._samples)
if not time_total:
return 0.0
return sum(self._units_processed) / time_total
def reset(self):
self._samples = []
self._units_processed = []
self._start_time = None
self._total_time = 0.0
self.count = 0
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class AverageMeter:
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_for_failure(remote_values):
"""Checks remote values for any that returned and failed.
Args:
remote_values (list): List of object IDs representing functions
that may fail in the middle of execution. For example, running
a SGD training loop in multiple parallel actor calls.
Returns:
Bool for success in executing given remote tasks.
"""
unfinished = remote_values
try:
while len(unfinished) > 0:
finished, unfinished = ray.wait(unfinished)
finished = ray.get(finished)
return True
except RayActorError as exc:
logger.exception(str(exc))
return False
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/signal.py
|
Python
|
import logging
from collections import defaultdict
import ray
import ray.cloudpickle as cloudpickle
# This string should be identical to the name of the signal sent upon
# detecting that an actor died.
# This constant is also used in NodeManager::PublishActorStateTransition()
# in node_manager.cc
ACTOR_DIED_STR = "ACTOR_DIED_SIGNAL"
logger = logging.getLogger(__name__)
class Signal:
"""Base class for Ray signals."""
pass
class ErrorSignal(Signal):
"""Signal raised if an exception happens in a task or actor method."""
def __init__(self, error):
self.error = error
class ActorDiedSignal(Signal):
"""Signal raised if an exception happens in a task or actor method."""
def __init__(self):
pass
def _get_task_id(source):
"""Return the task id associated to the generic source of the signal.
Args:
source: source of the signal, it can be either an object id returned
by a task, a task id, or an actor handle.
Returns:
- If source is an object id, return id of task which creted object.
- If source is an actor handle, return id of actor's task creator.
- If source is a task id, return same task id.
"""
if type(source) is ray.actor.ActorHandle:
return source._actor_id
else:
if type(source) is ray.TaskID:
return source
else:
return ray._raylet.compute_task_id(source)
def send(signal):
"""Send signal.
The signal has a unique identifier that is computed from (1) the id
of the actor or task sending this signal (i.e., the actor or task calling
this function), and (2) an index that is incremented every time this
source sends a signal. This index starts from 1.
Args:
signal: Signal to be sent.
"""
if ray.worker.global_worker.actor_id.is_nil():
source_key = ray.worker.global_worker.current_task_id.hex()
else:
source_key = ray.worker.global_worker.actor_id.hex()
encoded_signal = ray.utils.binary_to_hex(cloudpickle.dumps(signal))
ray.worker.global_worker.redis_client.execute_command(
"XADD " + source_key + " * signal " + encoded_signal)
def receive(sources, timeout=None):
"""Get all outstanding signals from sources.
A source can be either (1) an object ID returned by the task (we want
to receive signals from), or (2) an actor handle.
When invoked by the same entity E (where E can be an actor, task or
driver), for each source S in sources, this function returns all signals
generated by S since the last receive() was invoked by E on S. If this is
the first call on S, this function returns all past signals generated by S
so far. Note that different actors, tasks or drivers that call receive()
on the same source S will get independent copies of the signals generated
by S.
Args:
sources: List of sources from which the caller waits for signals.
A source is either an object ID returned by a task (in this case
the object ID is used to identify that task), or an actor handle.
If the user passes the IDs of multiple objects returned by the
same task, this function returns a copy of the signals generated
by that task for each object ID.
timeout: Maximum time (in seconds) this function waits to get a signal
from a source in sources. If None, the timeout is infinite.
Returns:
A list of pairs (S, sig), where S is a source in the sources argument,
and sig is a signal generated by S since the last time receive()
was called on S. Thus, for each S in sources, the return list can
contain zero or multiple entries.
"""
# If None, initialize the timeout to a huge value (i.e., over 30,000 years
# in this case) to "approximate" infinity.
if timeout is None:
timeout = 10**12
if timeout < 0:
raise ValueError("The 'timeout' argument cannot be less than 0.")
if not hasattr(ray.worker.global_worker, "signal_counters"):
ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0")
signal_counters = ray.worker.global_worker.signal_counters
# Map the ID of each source task to the source itself.
task_id_to_sources = defaultdict(lambda: [])
for s in sources:
task_id_to_sources[_get_task_id(s).hex()].append(s)
if timeout < 1e-3:
logger.warning("Timeout too small. Using 1ms minimum")
timeout = 1e-3
timeout_ms = int(1000 * timeout)
# Construct the redis query.
query = "XREAD BLOCK "
# redis expects ms.
query += str(timeout_ms)
query += " STREAMS "
query += " ".join(task_id_to_sources)
query += " "
query += " ".join([
ray.utils.decode(signal_counters[ray.utils.hex_to_binary(task_id)])
for task_id in task_id_to_sources
])
answers = ray.worker.global_worker.redis_client.execute_command(query)
if not answers:
return []
results = []
# Decoding is a little bit involved. Iterate through all the answers:
for i, answer in enumerate(answers):
# Make sure the answer corresponds to a source, s, in sources.
task_id = ray.utils.decode(answer[0])
task_source_list = task_id_to_sources[task_id]
# The list of results for source s is stored in answer[1]
for r in answer[1]:
for s in task_source_list:
if r[1][1].decode("ascii") == ACTOR_DIED_STR:
results.append((s, ActorDiedSignal()))
else:
# Now it gets tricky: r[0] is the redis internal sequence
# id
signal_counters[ray.utils.hex_to_binary(task_id)] = r[0]
# r[1] contains a list with elements (key, value), in our
# case we only have one key "signal" and the value is the
# signal.
signal = cloudpickle.loads(
ray.utils.hex_to_binary(r[1][1]))
results.append((s, signal))
return results
def forget(sources):
"""Ignore all previous signals associated with each source S in sources.
The index of the next expected signal from S is set to the index of
the last signal that S sent plus 1. This means that the next receive()
on S will only get the signals generated after this function was invoked.
Args:
sources: list of sources whose past signals are forgotten.
"""
# Just read all signals sent by all sources so far.
# This will results in ignoring these signals.
receive(sources, timeout=0)
def reset():
"""
Reset the worker state associated with any signals that this worker
has received so far.
If the worker calls receive() on a source next, it will get all the
signals generated by that source starting with index = 1.
"""
if hasattr(ray.worker.global_worker, "signal_counters"):
ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/test/async_test.py
|
Python
|
import asyncio
import time
import pytest
import ray
from ray.experimental import async_api
@pytest.fixture
def init():
ray.init(num_cpus=4)
async_api.init()
asyncio.get_event_loop().set_debug(False)
yield
async_api.shutdown()
ray.shutdown()
def gen_tasks(time_scale=0.1):
@ray.remote
def f(n):
time.sleep(n * time_scale)
return n
tasks = [f.remote(i) for i in range(5)]
return tasks
def test_simple(init):
@ray.remote
def f():
time.sleep(1)
return {"key1": ["value"]}
future = async_api.as_future(f.remote())
result = asyncio.get_event_loop().run_until_complete(future)
assert result["key1"] == ["value"]
def test_gather(init):
loop = asyncio.get_event_loop()
tasks = gen_tasks()
futures = [async_api.as_future(obj_id) for obj_id in tasks]
results = loop.run_until_complete(asyncio.gather(*futures))
assert all(a == b for a, b in zip(results, ray.get(tasks)))
def test_gather_benchmark(init):
@ray.remote
def f(n):
time.sleep(0.001 * n)
return 42
async def test_async():
sum_time = 0.
for _ in range(50):
tasks = [f.remote(n) for n in range(20)]
start = time.time()
futures = [async_api.as_future(obj_id) for obj_id in tasks]
await asyncio.gather(*futures)
sum_time += time.time() - start
return sum_time
def baseline():
sum_time = 0.
for _ in range(50):
tasks = [f.remote(n) for n in range(20)]
start = time.time()
ray.get(tasks)
sum_time += time.time() - start
return sum_time
# warm up
baseline()
# async get
sum_time_1 = asyncio.get_event_loop().run_until_complete(test_async())
# get
sum_time_2 = baseline()
# Ensure the new implementation is not too slow.
assert sum_time_2 * 1.2 > sum_time_1
def test_wait(init):
loop = asyncio.get_event_loop()
tasks = gen_tasks()
futures = [async_api.as_future(obj_id) for obj_id in tasks]
results, _ = loop.run_until_complete(asyncio.wait(futures))
assert set(results) == set(futures)
def test_wait_timeout(init):
loop = asyncio.get_event_loop()
tasks = gen_tasks(10)
futures = [async_api.as_future(obj_id) for obj_id in tasks]
fut = asyncio.wait(futures, timeout=5)
results, _ = loop.run_until_complete(fut)
assert list(results)[0] == futures[0]
def test_gather_mixup(init):
loop = asyncio.get_event_loop()
@ray.remote
def f(n):
time.sleep(n * 0.1)
return n
async def g(n):
await asyncio.sleep(n * 0.1)
return n
tasks = [
async_api.as_future(f.remote(1)),
g(2),
async_api.as_future(f.remote(3)),
g(4)
]
results = loop.run_until_complete(asyncio.gather(*tasks))
assert results == [1, 2, 3, 4]
def test_wait_mixup(init):
loop = asyncio.get_event_loop()
@ray.remote
def f(n):
time.sleep(n)
return n
def g(n):
async def _g(_n):
await asyncio.sleep(_n)
return _n
return asyncio.ensure_future(_g(n))
tasks = [
async_api.as_future(f.remote(0.1)),
g(7),
async_api.as_future(f.remote(5)),
g(2)
]
ready, _ = loop.run_until_complete(asyncio.wait(tasks, timeout=4))
assert set(ready) == {tasks[0], tasks[-1]}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/experimental/tf_utils.py
|
Python
|
from collections import deque, OrderedDict
import numpy as np
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def unflatten(vector, shapes):
i = 0
arrays = []
for shape in shapes:
size = np.prod(shape, dtype=np.int)
array = vector[i:(i + size)].reshape(shape)
arrays.append(array)
i += size
assert len(vector) == i, "Passed weight does not have the correct shape."
return arrays
class TensorFlowVariables:
"""A class used to set and get weights for Tensorflow networks.
Attributes:
sess (tf.Session): The tensorflow session used to run assignment.
variables (Dict[str, tf.Variable]): Extracted variables from the loss
or additional variables that are passed in.
placeholders (Dict[str, tf.placeholders]): Placeholders for weights.
assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.
"""
def __init__(self, output, sess=None, input_variables=None):
"""Creates TensorFlowVariables containing extracted variables.
The variables are extracted by performing a BFS search on the
dependency graph with loss as the root node. After the tree is
traversed and those variables are collected, we append input_variables
to the collected variables. For each variable in the list, the
variable has a placeholder and assignment operation created for it.
Args:
output (tf.Operation, List[tf.Operation]): The tensorflow
operation to extract all variables from.
sess (tf.Session): Session used for running the get and set
methods.
input_variables (List[tf.Variables]): Variables to include in the
list.
"""
self.sess = sess
if not isinstance(output, (list, tuple)):
output = [output]
queue = deque(output)
variable_names = []
explored_inputs = set(output)
# We do a BFS on the dependency graph of the input function to find
# the variables.
while len(queue) != 0:
tf_obj = queue.popleft()
if tf_obj is None:
continue
# The object put into the queue is not necessarily an operation,
# so we want the op attribute to get the operation underlying the
# object. Only operations contain the inputs that we can explore.
if hasattr(tf_obj, "op"):
tf_obj = tf_obj.op
for input_op in tf_obj.inputs:
if input_op not in explored_inputs:
queue.append(input_op)
explored_inputs.add(input_op)
# Tensorflow control inputs can be circular, so we keep track of
# explored operations.
for control in tf_obj.control_inputs:
if control not in explored_inputs:
queue.append(control)
explored_inputs.add(control)
if ("Variable" in tf_obj.node_def.op
or "VarHandle" in tf_obj.node_def.op):
variable_names.append(tf_obj.node_def.name)
self.variables = OrderedDict()
variable_list = [
v for v in tf.global_variables()
if v.op.node_def.name in variable_names
]
if input_variables is not None:
variable_list += input_variables
for v in variable_list:
self.variables[v.op.node_def.name] = v
self.placeholders = {}
self.assignment_nodes = {}
# Create new placeholders to put in custom weights.
for k, var in self.variables.items():
self.placeholders[k] = tf.placeholder(
var.value().dtype,
var.get_shape().as_list(),
name="Placeholder_" + k)
self.assignment_nodes[k] = var.assign(self.placeholders[k])
def set_session(self, sess):
"""Sets the current session used by the class.
Args:
sess (tf.Session): Session to set the attribute with.
"""
self.sess = sess
def get_flat_size(self):
"""Returns the total length of all of the flattened variables.
Returns:
The length of all flattened variables concatenated.
"""
return sum(
np.prod(v.get_shape().as_list()) for v in self.variables.values())
def _check_sess(self):
"""Checks if the session is set, and if not throw an error message."""
assert self.sess is not None, ("The session is not set. Set the "
"session either by passing it into the "
"TensorFlowVariables constructor or by "
"calling set_session(sess).")
def get_flat(self):
"""Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
"""
self._check_sess()
return np.concatenate([
v.eval(session=self.sess).flatten()
for v in self.variables.values()
])
def set_flat(self, new_weights):
"""Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
"""
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays)))
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
}
def set_weights(self, new_weights):
"""Sets the weights to new_weights.
Note:
Can set subsets of variables as well, by only passing in the
variables you want to be set.
Args:
new_weights (Dict): Dictionary mapping variable names to their
weights.
"""
self._check_sess()
assign_list = [
self.assignment_nodes[name] for name in new_weights.keys()
if name in self.assignment_nodes
]
assert assign_list, ("No variables in the input matched those in the "
"network. Possible cause: Two networks were "
"defined in the same TensorFlow graph. To fix "
"this, place each network definition in its own "
"tf.Graph.")
self.sess.run(
assign_list,
feed_dict={
self.placeholders[name]: value
for (name, value) in new_weights.items()
if name in self.placeholders
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/function_manager.py
|
Python
|
import dis
import hashlib
import importlib
import inspect
import json
import logging
import sys
import time
import threading
import traceback
from collections import (
namedtuple,
defaultdict,
)
import ray
from ray import profiling
from ray import ray_constants
from ray import cloudpickle as pickle
from ray.utils import (
binary_to_hex,
is_function_or_method,
is_class_method,
is_static_method,
check_oversized_pickle,
decode,
ensure_str,
format_error_message,
push_error_to_driver,
)
FunctionExecutionInfo = namedtuple("FunctionExecutionInfo",
["function", "function_name", "max_calls"])
"""FunctionExecutionInfo: A named tuple storing remote function information."""
logger = logging.getLogger(__name__)
class FunctionDescriptor:
"""A class used to describe a python function.
Attributes:
module_name: the module name that the function belongs to.
class_name: the class name that the function belongs to if exists.
It could be empty is the function is not a class method.
function_name: the function name of the function.
function_hash: the hash code of the function source code if the
function code is available.
function_id: the function id calculated from this descriptor.
is_for_driver_task: whether this descriptor is for driver task.
"""
def __init__(self,
module_name,
function_name,
class_name="",
function_source_hash=b""):
self._module_name = module_name
self._class_name = class_name
self._function_name = function_name
self._function_source_hash = function_source_hash
self._function_id = self._get_function_id()
def __repr__(self):
return ("FunctionDescriptor:" + self._module_name + "." +
self._class_name + "." + self._function_name + "." +
binary_to_hex(self._function_source_hash))
@classmethod
def from_bytes_list(cls, function_descriptor_list):
"""Create a FunctionDescriptor instance from list of bytes.
This function is used to create the function descriptor from
backend data.
Args:
cls: Current class which is required argument for classmethod.
function_descriptor_list: list of bytes to represent the
function descriptor.
Returns:
The FunctionDescriptor instance created from the bytes list.
"""
assert isinstance(function_descriptor_list, list)
if len(function_descriptor_list) == 0:
# This is a function descriptor of driver task.
return FunctionDescriptor.for_driver_task()
elif (len(function_descriptor_list) == 3
or len(function_descriptor_list) == 4):
module_name = ensure_str(function_descriptor_list[0])
class_name = ensure_str(function_descriptor_list[1])
function_name = ensure_str(function_descriptor_list[2])
if len(function_descriptor_list) == 4:
return cls(module_name, function_name, class_name,
function_descriptor_list[3])
else:
return cls(module_name, function_name, class_name)
else:
raise Exception(
"Invalid input for FunctionDescriptor.from_bytes_list")
@classmethod
def from_function(cls, function, pickled_function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
pickled_function: This is factored in to ensure that any
modifications to the function result in a different function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
pickled_function_hash = hashlib.sha1(pickled_function).digest()
return cls(module_name, function_name, class_name,
pickled_function_hash)
@classmethod
def from_class(cls, target_class):
"""Create a FunctionDescriptor from a class.
Args:
cls: Current class which is required argument for classmethod.
target_class: the python class used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the class.
"""
module_name = target_class.__module__
class_name = target_class.__name__
return cls(module_name, "__init__", class_name)
@classmethod
def for_driver_task(cls):
"""Create a FunctionDescriptor instance for a driver task."""
return cls("", "", "", b"")
@property
def is_for_driver_task(self):
"""See whether this function descriptor is for a driver or not.
Returns:
True if this function descriptor is for driver tasks.
"""
return all(
len(x) == 0
for x in [self.module_name, self.class_name, self.function_name])
@property
def module_name(self):
"""Get the module name of current function descriptor.
Returns:
The module name of the function descriptor.
"""
return self._module_name
@property
def class_name(self):
"""Get the class name of current function descriptor.
Returns:
The class name of the function descriptor. It could be
empty if the function is not a class method.
"""
return self._class_name
@property
def function_name(self):
"""Get the function name of current function descriptor.
Returns:
The function name of the function descriptor.
"""
return self._function_name
@property
def function_hash(self):
"""Get the hash code of the function source code.
Returns:
The bytes with length of ray_constants.ID_SIZE if the source
code is available. Otherwise, the bytes length will be 0.
"""
return self._function_source_hash
@property
def function_id(self):
"""Get the function id calculated from this descriptor.
Returns:
The value of ray.ObjectID that represents the function id.
"""
return self._function_id
def _get_function_id(self):
"""Calculate the function id of current function descriptor.
This function id is calculated from all the fields of function
descriptor.
Returns:
ray.ObjectID to represent the function descriptor.
"""
if self.is_for_driver_task:
return ray.FunctionID.nil()
function_id_hash = hashlib.sha1()
# Include the function module and name in the hash.
function_id_hash.update(self.module_name.encode("ascii"))
function_id_hash.update(self.function_name.encode("ascii"))
function_id_hash.update(self.class_name.encode("ascii"))
function_id_hash.update(self._function_source_hash)
# Compute the function ID.
function_id = function_id_hash.digest()
return ray.FunctionID(function_id)
def get_function_descriptor_list(self):
"""Return a list of bytes representing the function descriptor.
This function is used to pass this function descriptor to backend.
Returns:
A list of bytes.
"""
descriptor_list = []
if self.is_for_driver_task:
# Driver task returns an empty list.
return descriptor_list
else:
descriptor_list.append(self.module_name.encode("ascii"))
descriptor_list.append(self.class_name.encode("ascii"))
descriptor_list.append(self.function_name.encode("ascii"))
if len(self._function_source_hash) != 0:
descriptor_list.append(self._function_source_hash)
return descriptor_list
def is_actor_method(self):
"""Wether this function descriptor is an actor method.
Returns:
True if it's an actor method, False if it's a normal function.
"""
return len(self._class_name) > 0
class FunctionActorManager:
"""A class used to export/load remote functions and actors.
Attributes:
_worker: The associated worker that this manager related.
_functions_to_export: The remote functions to export when
the worker gets connected.
_actors_to_export: The actors to export when the worker gets
connected.
_function_execution_info: The map from job_id to function_id
and execution_info.
_num_task_executions: The map from job_id to function
execution times.
imported_actor_classes: The set of actor classes keys (format:
ActorClass:function_id) that are already in GCS.
"""
def __init__(self, worker):
self._worker = worker
self._functions_to_export = []
self._actors_to_export = []
# This field is a dictionary that maps a driver ID to a dictionary of
# functions (and information about those functions) that have been
# registered for that driver (this inner dictionary maps function IDs
# to a FunctionExecutionInfo object. This should only be used on
# workers that execute remote functions.
self._function_execution_info = defaultdict(lambda: {})
self._num_task_executions = defaultdict(lambda: {})
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
self._loaded_actor_classes = {}
self.lock = threading.Lock()
self.execution_infos = {}
def increase_task_counter(self, job_id, function_descriptor):
function_id = function_descriptor.function_id
if self._worker.load_code_from_local:
job_id = ray.JobID.nil()
self._num_task_executions[job_id][function_id] += 1
def get_task_counter(self, job_id, function_descriptor):
function_id = function_descriptor.function_id
if self._worker.load_code_from_local:
job_id = ray.JobID.nil()
return self._num_task_executions[job_id][function_id]
def compute_collision_identifier(self, function_or_class):
"""The identifier is used to detect excessive duplicate exports.
The identifier is used to determine when the same function or class is
exported many times. This can yield false positives.
Args:
function_or_class: The function or class to compute an identifier
for.
Returns:
The identifier. Note that different functions or classes can give
rise to same identifier. However, the same function should
hopefully always give rise to the same identifier. TODO(rkn):
verify if this is actually the case. Note that if the
identifier is incorrect in any way, then we may give warnings
unnecessarily or fail to give warnings, but the application's
behavior won't change.
"""
import io
string_file = io.StringIO()
if sys.version_info[1] >= 7:
dis.dis(function_or_class, file=string_file, depth=2)
else:
dis.dis(function_or_class, file=string_file)
collision_identifier = (
function_or_class.__name__ + ":" + string_file.getvalue())
# Return a hash of the identifier in case it is too large.
return hashlib.sha1(collision_identifier.encode("ascii")).digest()
def export(self, remote_function):
"""Pickle a remote function and export it to redis.
Args:
remote_function: the RemoteFunction object.
"""
if self._worker.mode == ray.worker.LOCAL_MODE:
return
if self._worker.load_code_from_local:
return
function = remote_function._function
pickled_function = pickle.dumps(function)
check_oversized_pickle(pickled_function,
remote_function._function_name,
"remote function", self._worker)
key = (b"RemoteFunction:" + self._worker.current_job_id.binary() + b":"
+ remote_function._function_descriptor.function_id.binary())
self._worker.redis_client.hmset(
key, {
"job_id": self._worker.current_job_id.binary(),
"function_id": remote_function._function_descriptor.
function_id.binary(),
"function_name": remote_function._function_name,
"module": function.__module__,
"function": pickled_function,
"collision_identifier": self.compute_collision_identifier(
function),
"max_calls": remote_function._max_calls
})
self._worker.redis_client.rpush("Exports", key)
def fetch_and_register_remote_function(self, key):
"""Import a remote function."""
(job_id_str, function_id_str, function_name, serialized_function,
num_return_vals, module, resources,
max_calls) = self._worker.redis_client.hmget(key, [
"job_id", "function_id", "function_name", "function",
"num_return_vals", "module", "resources", "max_calls"
])
function_id = ray.FunctionID(function_id_str)
job_id = ray.JobID(job_id_str)
function_name = decode(function_name)
max_calls = int(max_calls)
module = decode(module)
# This is a placeholder in case the function can't be unpickled. This
# will be overwritten if the function is successfully registered.
def f(*args, **kwargs):
raise Exception("This function was not imported properly.")
# This function is called by ImportThread. This operation needs to be
# atomic. Otherwise, there is race condition. Another thread may use
# the temporary function above before the real function is ready.
with self.lock:
self._function_execution_info[job_id][function_id] = (
FunctionExecutionInfo(
function=f,
function_name=function_name,
max_calls=max_calls))
self._num_task_executions[job_id][function_id] = 0
try:
function = pickle.loads(serialized_function)
except Exception:
# If an exception was thrown when the remote function was
# imported, we record the traceback and notify the scheduler
# of the failure.
traceback_str = format_error_message(traceback.format_exc())
# Log the error message.
push_error_to_driver(
self._worker,
ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR,
"Failed to unpickle the remote function '{}' with "
"function ID {}. Traceback:\n{}".format(
function_name, function_id.hex(), traceback_str),
job_id=job_id)
else:
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python
# script was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
function.__module__ = module
self._function_execution_info[job_id][function_id] = (
FunctionExecutionInfo(
function=function,
function_name=function_name,
max_calls=max_calls))
# Add the function to the function table.
self._worker.redis_client.rpush(
b"FunctionTable:" + function_id.binary(),
self._worker.worker_id)
def get_execution_info(self, job_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
job_id: ID of the job that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
if self._worker.load_code_from_local:
# Load function from local code.
# Currently, we don't support isolating code by jobs,
# thus always set job ID to NIL here.
job_id = ray.JobID.nil()
if not function_descriptor.is_actor_method():
self._load_function_from_local(job_id, function_descriptor)
else:
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, job_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[job_id][function_id]
except KeyError as e:
message = ("Error occurs in get_execution_info: "
"job_id: %s, function_descriptor: %s. Message: %s" %
(job_id, function_descriptor, e))
raise KeyError(message)
return info
def _load_function_from_local(self, job_id, function_descriptor):
assert not function_descriptor.is_actor_method()
function_id = function_descriptor.function_id
if (job_id in self._function_execution_info
and function_id in self._function_execution_info[function_id]):
return
module_name, function_name = (
function_descriptor.module_name,
function_descriptor.function_name,
)
try:
module = importlib.import_module(module_name)
function = getattr(module, function_name)._function
self._function_execution_info[job_id][function_id] = (
FunctionExecutionInfo(
function=function,
function_name=function_name,
max_calls=0,
))
self._num_task_executions[job_id][function_id] = 0
except Exception:
logger.exception(
"Failed to load function %s.".format(function_name))
raise Exception(
"Function {} failed to be loaded from local code.".format(
function_descriptor))
def _wait_for_function(self, function_descriptor, job_id, timeout=10):
"""Wait until the function to be executed is present on this worker.
This method will simply loop until the import thread has imported the
relevant function. If we spend too long in this loop, that may indicate
a problem somewhere and we will push an error message to the user.
If this worker is an actor, then this will wait until the actor has
been defined.
Args:
function_descriptor : The FunctionDescriptor of the function that
we want to execute.
job_id (str): The ID of the job to push the error message to
if this times out.
"""
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
with self.lock:
if (self._worker.actor_id.is_nil()
and (function_descriptor.function_id in
self._function_execution_info[job_id])):
break
elif not self._worker.actor_id.is_nil() and (
self._worker.actor_id in self._worker.actors):
break
if time.time() - start_time > timeout:
warning_message = ("This worker was asked to execute a "
"function that it does not have "
"registered. You may have to restart "
"Ray.")
if not warning_sent:
ray.utils.push_error_to_driver(
self._worker,
ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR,
warning_message,
job_id=job_id)
warning_sent = True
time.sleep(0.001)
def _publish_actor_class_to_key(self, key, actor_class_info):
"""Push an actor class definition to Redis.
The is factored out as a separate function because it is also called
on cached actor class definitions when a worker connects for the first
time.
Args:
key: The key to store the actor class info at.
actor_class_info: Information about the actor class.
"""
# We set the driver ID here because it may not have been available when
# the actor class was defined.
self._worker.redis_client.hmset(key, actor_class_info)
self._worker.redis_client.rpush("Exports", key)
def export_actor_class(self, Class, actor_method_names):
if self._worker.load_code_from_local:
return
function_descriptor = FunctionDescriptor.from_class(Class)
# `current_job_id` shouldn't be NIL, unless:
# 1) This worker isn't an actor;
# 2) And a previous task started a background thread, which didn't
# finish before the task finished, and still uses Ray API
# after that.
assert not self._worker.current_job_id.is_nil(), (
"You might have started a background thread in a non-actor task, "
"please make sure the thread finishes before the task finishes.")
job_id = self._worker.current_job_id
key = (b"ActorClass:" + job_id.binary() + b":" +
function_descriptor.function_id.binary())
actor_class_info = {
"class_name": Class.__name__,
"module": Class.__module__,
"class": pickle.dumps(Class),
"job_id": job_id.binary(),
"collision_identifier": self.compute_collision_identifier(Class),
"actor_method_names": json.dumps(list(actor_method_names))
}
check_oversized_pickle(actor_class_info["class"],
actor_class_info["class_name"], "actor",
self._worker)
self._publish_actor_class_to_key(key, actor_class_info)
# TODO(rkn): Currently we allow actor classes to be defined
# within tasks. I tried to disable this, but it may be necessary
# because of https://github.com/ray-project/ray/issues/1146.
def load_actor_class(self, job_id, function_descriptor):
"""Load the actor class.
Args:
job_id: job ID of the actor.
function_descriptor: Function descriptor of the actor constructor.
Returns:
The actor class.
"""
function_id = function_descriptor.function_id
# Check if the actor class already exists in the cache.
actor_class = self._loaded_actor_classes.get(function_id, None)
if actor_class is None:
# Load actor class.
if self._worker.load_code_from_local:
job_id = ray.JobID.nil()
# Load actor class from local code.
actor_class = self._load_actor_from_local(
job_id, function_descriptor)
else:
# Load actor class from GCS.
actor_class = self._load_actor_class_from_gcs(
job_id, function_descriptor)
# Save the loaded actor class in cache.
self._loaded_actor_classes[function_id] = actor_class
# Generate execution info for the methods of this actor class.
module_name = function_descriptor.module_name
actor_class_name = function_descriptor.class_name
actor_methods = inspect.getmembers(
actor_class, predicate=is_function_or_method)
for actor_method_name, actor_method in actor_methods:
method_descriptor = FunctionDescriptor(
module_name, actor_method_name, actor_class_name)
method_id = method_descriptor.function_id
executor = self._make_actor_method_executor(
actor_method_name,
actor_method,
actor_imported=True,
)
self._function_execution_info[job_id][method_id] = (
FunctionExecutionInfo(
function=executor,
function_name=actor_method_name,
max_calls=0,
))
self._num_task_executions[job_id][method_id] = 0
self._num_task_executions[job_id][function_id] = 0
return actor_class
def _load_actor_from_local(self, job_id, function_descriptor):
"""Load actor class from local code."""
assert isinstance(job_id, ray.JobID)
module_name, class_name = (function_descriptor.module_name,
function_descriptor.class_name)
try:
module = importlib.import_module(module_name)
actor_class = getattr(module, class_name)
if isinstance(actor_class, ray.actor.ActorClass):
return actor_class.__ray_metadata__.modified_class
else:
return actor_class
except Exception:
logger.exception(
"Failed to load actor_class %s.".format(class_name))
raise Exception(
"Actor {} failed to be imported from local code.".format(
class_name))
def _create_fake_actor_class(self, actor_class_name, actor_method_names):
class TemporaryActor:
pass
def temporary_actor_method(*args, **kwargs):
raise Exception(
"The actor with name {} failed to be imported, "
"and so cannot execute this method.".format(actor_class_name))
for method in actor_method_names:
setattr(TemporaryActor, method, temporary_actor_method)
return TemporaryActor
def _load_actor_class_from_gcs(self, job_id, function_descriptor):
"""Load actor class from GCS."""
key = (b"ActorClass:" + job_id.binary() + b":" +
function_descriptor.function_id.binary())
# Wait for the actor class key to have been imported by the
# import thread. TODO(rkn): It shouldn't be possible to end
# up in an infinite loop here, but we should push an error to
# the driver if too much time is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
# Fetch raw data from GCS.
(job_id_str, class_name, module, pickled_class,
actor_method_names) = self._worker.redis_client.hmget(
key,
["job_id", "class_name", "module", "class", "actor_method_names"])
class_name = ensure_str(class_name)
module_name = ensure_str(module)
job_id = ray.JobID(job_id_str)
actor_method_names = json.loads(ensure_str(actor_method_names))
actor_class = None
try:
with self.lock:
actor_class = pickle.loads(pickled_class)
except Exception:
logger.exception(
"Failed to load actor class %s.".format(class_name))
# The actor class failed to be unpickled, create a fake actor
# class instead (just to produce error messages and to prevent
# the driver from hanging).
actor_class = self._create_fake_actor_class(
class_name, actor_method_names)
# If an exception was thrown when the actor was imported, we record
# the traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
# Log the error message.
push_error_to_driver(
self._worker,
ray_constants.REGISTER_ACTOR_PUSH_ERROR,
"Failed to unpickle actor class '{}' for actor ID {}. "
"Traceback:\n{}".format(
class_name, self._worker.actor_id.hex(), traceback_str),
job_id=job_id)
# TODO(rkn): In the future, it might make sense to have the worker
# exit here. However, currently that would lead to hanging if
# someone calls ray.get on a method invoked on the actor.
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python script
# was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
actor_class.__module__ = module_name
return actor_class
def _make_actor_method_executor(self, method_name, method, actor_imported):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method.
"""
def actor_method_executor(actor, *args, **kwargs):
# Update the actor's task counter to reflect the task we're about
# to execute.
self._worker.actor_task_counter += 1
# Execute the assigned method and save a checkpoint if necessary.
try:
is_bound = (is_class_method(method)
or is_static_method(type(actor), method_name))
if is_bound:
method_returns = method(*args, **kwargs)
else:
method_returns = method(actor, *args, **kwargs)
except Exception as e:
# Save the checkpoint before allowing the method exception
# to be thrown, but don't save the checkpoint for actor
# creation task.
if (isinstance(actor, ray.actor.Checkpointable)
and self._worker.actor_task_counter != 1):
self._save_and_log_checkpoint(actor)
raise e
else:
# Handle any checkpointing operations before storing the
# method's return values.
# NOTE(swang): If method_returns is a pointer to the actor's
# state and the checkpointing operations can modify the return
# values if they mutate the actor's state. Is this okay?
if isinstance(actor, ray.actor.Checkpointable):
# If this is the first task to execute on the actor, try to
# resume from a checkpoint.
if self._worker.actor_task_counter == 1:
if actor_imported:
self._restore_and_log_checkpoint(actor)
else:
# Save the checkpoint before returning the method's
# return values.
self._save_and_log_checkpoint(actor)
return method_returns
# Set method_name and method as attributes to the executor clusore
# so we can make decision based on these attributes in task executor.
# Precisely, asyncio support requires to know whether:
# - the method is a ray internal method: starts with __ray
# - the method is a coroutine function: defined by async def
actor_method_executor.name = method_name
actor_method_executor.method = method
return actor_method_executor
def _save_and_log_checkpoint(self, actor):
"""Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
"""
actor_id = self._worker.actor_id
checkpoint_info = self._worker.actor_checkpoint_info[actor_id]
checkpoint_info.num_tasks_since_last_checkpoint += 1
now = int(1000 * time.time())
checkpoint_context = ray.actor.CheckpointContext(
actor_id, checkpoint_info.num_tasks_since_last_checkpoint,
now - checkpoint_info.last_checkpoint_timestamp)
# If we should take a checkpoint, notify raylet to prepare a checkpoint
# and then call `save_checkpoint`.
if actor.should_checkpoint(checkpoint_context):
try:
now = int(1000 * time.time())
checkpoint_id = (self._worker.raylet_client.
prepare_actor_checkpoint(actor_id))
checkpoint_info.checkpoint_ids.append(checkpoint_id)
actor.save_checkpoint(actor_id, checkpoint_id)
if (len(checkpoint_info.checkpoint_ids) >
ray._config.num_actor_checkpoints_to_keep()):
actor.checkpoint_expired(
actor_id,
checkpoint_info.checkpoint_ids.pop(0),
)
checkpoint_info.num_tasks_since_last_checkpoint = 0
checkpoint_info.last_checkpoint_timestamp = now
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
job_id=self._worker.current_job_id)
def _restore_and_log_checkpoint(self, actor):
"""Restore an actor from a checkpoint if available and log any errors.
This should only be called on workers that have just executed an actor
creation task.
Args:
actor: The actor to restore from a checkpoint.
"""
actor_id = self._worker.actor_id
try:
checkpoints = ray.actor.get_checkpoints_for_actor(actor_id)
if len(checkpoints) > 0:
# If we found previously saved checkpoints for this actor,
# call the `load_checkpoint` callback.
checkpoint_id = actor.load_checkpoint(actor_id, checkpoints)
if checkpoint_id is not None:
# Check that the returned checkpoint id is in the
# `available_checkpoints` list.
msg = (
"`load_checkpoint` must return a checkpoint id that " +
"exists in the `available_checkpoints` list, or None.")
assert any(checkpoint_id == checkpoint.checkpoint_id
for checkpoint in checkpoints), msg
# Notify raylet that this actor has been resumed from
# a checkpoint.
(self._worker.raylet_client.
notify_actor_resumed_from_checkpoint(
actor_id, checkpoint_id))
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
job_id=self._worker.current_job_id)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/gcs_utils.py
|
Python
|
from ray.core.generated.gcs_pb2 import (
ActorCheckpointIdData,
ActorTableData,
GcsNodeInfo,
JobTableData,
ErrorTableData,
ErrorType,
GcsEntry,
HeartbeatBatchTableData,
HeartbeatTableData,
ObjectTableData,
ProfileTableData,
TablePrefix,
TablePubsub,
TaskTableData,
ResourceTableData,
)
__all__ = [
"ActorCheckpointIdData",
"ActorTableData",
"GcsNodeInfo",
"JobTableData",
"ErrorTableData",
"ErrorType",
"GcsEntry",
"HeartbeatBatchTableData",
"HeartbeatTableData",
"ObjectTableData",
"ProfileTableData",
"TablePrefix",
"TablePubsub",
"TaskTableData",
"ResourceTableData",
"construct_error_message",
]
FUNCTION_PREFIX = "RemoteFunction:"
LOG_FILE_CHANNEL = "RAY_LOG_CHANNEL"
REPORTER_CHANNEL = "RAY_REPORTER"
# xray heartbeats
XRAY_HEARTBEAT_CHANNEL = str(
TablePubsub.Value("HEARTBEAT_PUBSUB")).encode("ascii")
XRAY_HEARTBEAT_BATCH_CHANNEL = str(
TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB")).encode("ascii")
# xray job updates
XRAY_JOB_CHANNEL = str(TablePubsub.Value("JOB_PUBSUB")).encode("ascii")
# These prefixes must be kept up-to-date with the TablePrefix enum in
# gcs.proto.
# TODO(rkn): We should use scoped enums, in which case we should be able to
# just access the flatbuffer generated values.
TablePrefix_RAYLET_TASK_string = "RAYLET_TASK"
TablePrefix_OBJECT_string = "OBJECT"
TablePrefix_ERROR_INFO_string = "ERROR_INFO"
TablePrefix_PROFILE_string = "PROFILE"
TablePrefix_JOB_string = "JOB"
TablePrefix_ACTOR_string = "ACTOR"
def construct_error_message(job_id, error_type, message, timestamp):
"""Construct a serialized ErrorTableData object.
Args:
job_id: The ID of the job that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The serialized object.
"""
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data.SerializeToString()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/import_thread.py
|
Python
|
from collections import defaultdict
import threading
import traceback
import redis
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
from ray import profiling
from ray import utils
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Note: The driver also has an import thread, which is used only to import
custom class definitions from calls to _register_custom_serializer that
happen under the hood on workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.redis_client = worker.redis_client
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
import_pubsub_client = self.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
try:
# Get the exports that occurred before the call to subscribe.
export_keys = self.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
self._process_key(key)
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = import_pubsub_client.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = self.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = self.redis_client.lindex("Exports", i)
self._process_key(key)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("ImportThread: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
import_pubsub_client.close()
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = (self.redis_client.hmget(
key, ["collision_identifier", "function_name"]))
return (collision_identifier, ray.utils.decode(function_name),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self.redis_client.hmget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray.utils.decode(class_name), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
# Handle the driver case first.
if self.mode != ray.WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
elif (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
# Return because FunctionsToRun are the only things that
# the driver should import.
return
if key.startswith(b"RemoteFunction"):
with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
raise Exception("This code should be unreachable.")
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function,
run_on_other_drivers) = self.redis_client.hmget(
key, ["job_id", "function", "run_on_other_drivers"])
if (utils.decode(run_on_other_drivers) == "False"
and self.worker.mode == ray.SCRIPT_MODE
and job_id != self.worker.current_job_id.binary()):
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/common.pxd
|
Cython
|
from libcpp cimport bool as c_bool
from libcpp.memory cimport shared_ptr, unique_ptr
from libcpp.string cimport string as c_string
from libc.stdint cimport uint8_t, int32_t, uint64_t, int64_t
from libcpp.unordered_map cimport unordered_map
from libcpp.vector cimport vector as c_vector
from ray.includes.unique_ids cimport (
CActorID,
CJobID,
CWorkerID,
CObjectID,
CTaskID,
)
cdef extern from * namespace "polyfill":
"""
namespace polyfill {
template <typename T>
inline typename std::remove_reference<T>::type&& move(T& t) {
return std::move(t);
}
template <typename T>
inline typename std::remove_reference<T>::type&& move(T&& t) {
return std::move(t);
}
} // namespace polyfill
"""
cdef T move[T](T)
cdef extern from "ray/common/status.h" namespace "ray" nogil:
cdef cppclass StatusCode:
pass
cdef cppclass CRayStatus "ray::Status":
RayStatus()
RayStatus(StatusCode code, const c_string &msg)
RayStatus(const CRayStatus &s)
@staticmethod
CRayStatus OK()
@staticmethod
CRayStatus OutOfMemory(const c_string &msg)
@staticmethod
CRayStatus KeyError(const c_string &msg)
@staticmethod
CRayStatus Invalid(const c_string &msg)
@staticmethod
CRayStatus IOError(const c_string &msg)
@staticmethod
CRayStatus TypeError(const c_string &msg)
@staticmethod
CRayStatus UnknownError(const c_string &msg)
@staticmethod
CRayStatus NotImplemented(const c_string &msg)
@staticmethod
CRayStatus ObjectStoreFull(const c_string &msg)
@staticmethod
CRayStatus RedisError(const c_string &msg)
@staticmethod
CRayStatus TimedOut(const c_string &msg)
@staticmethod
CRayStatus Interrupted(const c_string &msg)
@staticmethod
CRayStatus IntentionalSystemExit()
@staticmethod
CRayStatus UnexpectedSystemExit()
c_bool ok()
c_bool IsOutOfMemory()
c_bool IsKeyError()
c_bool IsInvalid()
c_bool IsIOError()
c_bool IsTypeError()
c_bool IsUnknownError()
c_bool IsNotImplemented()
c_bool IsObjectStoreFull()
c_bool IsRedisError()
c_bool IsTimedOut()
c_bool IsInterrupted()
c_bool IsSystemExit()
c_string ToString()
c_string CodeAsString()
StatusCode code()
c_string message()
# We can later add more of the common status factory methods as needed
cdef CRayStatus RayStatus_OK "Status::OK"()
cdef CRayStatus RayStatus_Invalid "Status::Invalid"()
cdef CRayStatus RayStatus_NotImplemented "Status::NotImplemented"()
cdef extern from "ray/common/status.h" namespace "ray::StatusCode" nogil:
cdef StatusCode StatusCode_OK "OK"
cdef StatusCode StatusCode_OutOfMemory "OutOfMemory"
cdef StatusCode StatusCode_KeyError "KeyError"
cdef StatusCode StatusCode_TypeError "TypeError"
cdef StatusCode StatusCode_Invalid "Invalid"
cdef StatusCode StatusCode_IOError "IOError"
cdef StatusCode StatusCode_UnknownError "UnknownError"
cdef StatusCode StatusCode_NotImplemented "NotImplemented"
cdef StatusCode StatusCode_RedisError "RedisError"
cdef extern from "ray/common/id.h" namespace "ray" nogil:
const CTaskID GenerateTaskId(const CJobID &job_id,
const CTaskID &parent_task_id,
int parent_task_counter)
cdef extern from "ray/protobuf/common.pb.h" nogil:
cdef cppclass CLanguage "Language":
pass
cdef cppclass CWorkerType "ray::WorkerType":
pass
cdef cppclass CTaskType "ray::TaskType":
pass
cdef cppclass CAddress "ray::rpc::Address":
CAddress()
const c_string &SerializeAsString()
void ParseFromString(const c_string &serialized)
# This is a workaround for C++ enum class since Cython has no corresponding
# representation.
cdef extern from "ray/protobuf/common.pb.h" nogil:
cdef CLanguage LANGUAGE_PYTHON "Language::PYTHON"
cdef CLanguage LANGUAGE_CPP "Language::CPP"
cdef CLanguage LANGUAGE_JAVA "Language::JAVA"
cdef extern from "ray/protobuf/common.pb.h" nogil:
cdef CWorkerType WORKER_TYPE_WORKER "ray::WorkerType::WORKER"
cdef CWorkerType WORKER_TYPE_DRIVER "ray::WorkerType::DRIVER"
cdef extern from "ray/protobuf/common.pb.h" nogil:
cdef CTaskType TASK_TYPE_NORMAL_TASK "ray::TaskType::NORMAL_TASK"
cdef CTaskType TASK_TYPE_ACTOR_CREATION_TASK "ray::TaskType::ACTOR_CREATION_TASK" # noqa: E501
cdef CTaskType TASK_TYPE_ACTOR_TASK "ray::TaskType::ACTOR_TASK"
cdef extern from "ray/common/task/scheduling_resources.h" nogil:
cdef cppclass ResourceSet "ray::ResourceSet":
ResourceSet()
ResourceSet(const unordered_map[c_string, double] &resource_map)
ResourceSet(const c_vector[c_string] &resource_labels,
const c_vector[double] resource_capacity)
c_bool operator==(const ResourceSet &rhs) const
c_bool IsEqual(const ResourceSet &other) const
c_bool IsSubset(const ResourceSet &other) const
c_bool IsSuperset(const ResourceSet &other) const
c_bool AddOrUpdateResource(const c_string &resource_name,
double capacity)
c_bool RemoveResource(const c_string &resource_name)
void AddResources(const ResourceSet &other)
c_bool SubtractResourcesStrict(const ResourceSet &other)
c_bool GetResource(const c_string &resource_name, double *value) const
double GetNumCpus() const
c_bool IsEmpty() const
const unordered_map[c_string, double] &GetResourceMap() const
const c_string ToString() const
cdef extern from "ray/common/buffer.h" namespace "ray" nogil:
cdef cppclass CBuffer "ray::Buffer":
uint8_t *Data() const
size_t Size() const
cdef cppclass LocalMemoryBuffer(CBuffer):
LocalMemoryBuffer(uint8_t *data, size_t size, c_bool copy_data)
LocalMemoryBuffer(size_t size)
cdef extern from "ray/common/ray_object.h" nogil:
cdef cppclass CRayObject "ray::RayObject":
c_bool HasData() const
c_bool HasMetadata() const
const size_t DataSize() const
const shared_ptr[CBuffer] &GetData()
const shared_ptr[CBuffer] &GetMetadata() const
c_bool IsInPlasmaError() const
cdef extern from "ray/core_worker/common.h" nogil:
cdef cppclass CRayFunction "ray::RayFunction":
CRayFunction()
CRayFunction(CLanguage language,
const c_vector[c_string] function_descriptor)
CLanguage GetLanguage()
const c_vector[c_string]& GetFunctionDescriptor()
cdef cppclass CTaskArg "ray::TaskArg":
@staticmethod
CTaskArg PassByReference(const CObjectID &object_id)
@staticmethod
CTaskArg PassByValue(const shared_ptr[CRayObject] &data)
cdef cppclass CTaskOptions "ray::TaskOptions":
CTaskOptions()
CTaskOptions(int num_returns, c_bool is_direct_call,
unordered_map[c_string, double] &resources)
cdef cppclass CActorCreationOptions "ray::ActorCreationOptions":
CActorCreationOptions()
CActorCreationOptions(
uint64_t max_reconstructions, c_bool is_direct_call,
int32_t max_concurrency,
const unordered_map[c_string, double] &resources,
const unordered_map[c_string, double] &placement_resources,
const c_vector[c_string] &dynamic_worker_options,
c_bool is_detached, c_bool is_asyncio)
cdef extern from "ray/gcs/gcs_client.h" nogil:
cdef cppclass CGcsClientOptions "ray::gcs::GcsClientOptions":
CGcsClientOptions(const c_string &ip, int port,
const c_string &password,
c_bool is_test_client)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/libcoreworker.pxd
|
Cython
|
# cython: profile = False
# distutils: language = c++
# cython: embedsignature = True
from libc.stdint cimport int64_t
from libcpp cimport bool as c_bool
from libcpp.memory cimport shared_ptr, unique_ptr
from libcpp.string cimport string as c_string
from libcpp.unordered_map cimport unordered_map
from libcpp.utility cimport pair
from libcpp.vector cimport vector as c_vector
from ray.includes.unique_ids cimport (
CActorID,
CJobID,
CTaskID,
CObjectID,
)
from ray.includes.common cimport (
CAddress,
CActorCreationOptions,
CBuffer,
CRayFunction,
CRayObject,
CRayStatus,
CTaskArg,
CTaskOptions,
CTaskType,
CWorkerType,
CLanguage,
CGcsClientOptions,
)
from ray.includes.task cimport CTaskSpec
from ray.includes.libraylet cimport CRayletClient
ctypedef unordered_map[c_string, c_vector[pair[int64_t, double]]] \
ResourceMappingType
ctypedef void (*ray_callback_function) \
(shared_ptr[CRayObject] result_object,
CObjectID object_id, void* user_data)
cdef extern from "ray/core_worker/profiling.h" nogil:
cdef cppclass CProfiler "ray::worker::Profiler":
void Start()
cdef cppclass CProfileEvent "ray::worker::ProfileEvent":
CProfileEvent(const shared_ptr[CProfiler] profiler,
const c_string &event_type)
void SetExtraData(const c_string &extra_data)
cdef extern from "ray/core_worker/profiling.h" nogil:
cdef cppclass CProfileEvent "ray::worker::ProfileEvent":
void SetExtraData(const c_string &extra_data)
cdef extern from "ray/core_worker/transport/direct_actor_transport.h" nogil:
cdef cppclass CFiberEvent "ray::FiberEvent":
CFiberEvent()
void Wait()
void Notify()
cdef extern from "ray/core_worker/context.h" nogil:
cdef cppclass CWorkerContext "ray::WorkerContext":
c_bool CurrentActorIsAsync()
cdef extern from "ray/core_worker/core_worker.h" nogil:
cdef cppclass CCoreWorker "ray::CoreWorker":
CCoreWorker(const CWorkerType worker_type, const CLanguage language,
const c_string &store_socket,
const c_string &raylet_socket, const CJobID &job_id,
const CGcsClientOptions &gcs_options,
const c_string &log_dir, const c_string &node_ip_address,
int node_manager_port,
CRayStatus (
CTaskType task_type,
const CRayFunction &ray_function,
const unordered_map[c_string, double] &resources,
const c_vector[shared_ptr[CRayObject]] &args,
const c_vector[CObjectID] &arg_reference_ids,
const c_vector[CObjectID] &return_ids,
c_vector[shared_ptr[CRayObject]] *returns) nogil,
CRayStatus() nogil,
c_bool ref_counting_enabled)
CWorkerType &GetWorkerType()
CLanguage &GetLanguage()
void StartExecutingTasks()
CRayStatus SubmitTask(
const CRayFunction &function, const c_vector[CTaskArg] &args,
const CTaskOptions &options, c_vector[CObjectID] *return_ids,
int max_retries)
CRayStatus CreateActor(
const CRayFunction &function, const c_vector[CTaskArg] &args,
const CActorCreationOptions &options, CActorID *actor_id)
CRayStatus SubmitActorTask(
const CActorID &actor_id, const CRayFunction &function,
const c_vector[CTaskArg] &args, const CTaskOptions &options,
c_vector[CObjectID] *return_ids)
CRayStatus KillActor(const CActorID &actor_id)
unique_ptr[CProfileEvent] CreateProfileEvent(
const c_string &event_type)
CRayStatus AllocateReturnObjects(
const c_vector[CObjectID] &object_ids,
const c_vector[size_t] &data_sizes,
const c_vector[shared_ptr[CBuffer]] &metadatas,
c_vector[shared_ptr[CRayObject]] *return_objects)
# TODO(edoakes): remove this once the raylet client is no longer used
# directly.
CRayletClient &GetRayletClient()
CJobID GetCurrentJobId()
CTaskID GetCurrentTaskId()
const CActorID &GetActorId()
void SetWebuiDisplay(const c_string &message)
CTaskID GetCallerId()
const ResourceMappingType &GetResourceIDs() const
CActorID DeserializeAndRegisterActorHandle(const c_string &bytes)
CRayStatus SerializeActorHandle(const CActorID &actor_id, c_string
*bytes)
void AddLocalReference(const CObjectID &object_id)
void RemoveLocalReference(const CObjectID &object_id)
void PromoteObjectToPlasma(const CObjectID &object_id)
void PromoteToPlasmaAndGetOwnershipInfo(const CObjectID &object_id,
CTaskID *owner_id,
CAddress *owner_address)
void RegisterOwnershipInfoAndResolveFuture(
const CObjectID &object_id, const CTaskID &owner_id, const
CAddress &owner_address)
CRayStatus SetClientOptions(c_string client_name, int64_t limit)
CRayStatus Put(const CRayObject &object, CObjectID *object_id)
CRayStatus Put(const CRayObject &object, const CObjectID &object_id)
CRayStatus Create(const shared_ptr[CBuffer] &metadata,
const size_t data_size,
CObjectID *object_id, shared_ptr[CBuffer] *data)
CRayStatus Create(const shared_ptr[CBuffer] &metadata,
const size_t data_size, const CObjectID &object_id,
shared_ptr[CBuffer] *data)
CRayStatus Seal(const CObjectID &object_id, c_bool owns_object,
c_bool pin_object)
CRayStatus Get(const c_vector[CObjectID] &ids, int64_t timeout_ms,
c_vector[shared_ptr[CRayObject]] *results)
CRayStatus Contains(const CObjectID &object_id, c_bool *has_object)
CRayStatus Wait(const c_vector[CObjectID] &object_ids, int num_objects,
int64_t timeout_ms, c_vector[c_bool] *results)
CRayStatus Delete(const c_vector[CObjectID] &object_ids,
c_bool local_only, c_bool delete_creating_tasks)
c_string MemoryUsageString()
CWorkerContext &GetWorkerContext()
void YieldCurrentFiber(CFiberEvent &coroutine_done)
unordered_map[CObjectID, pair[size_t, size_t]] GetAllReferenceCounts()
void GetAsync(const CObjectID &object_id,
ray_callback_function successs_callback,
ray_callback_function fallback_callback,
void* python_future)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/libraylet.pxd
|
Cython
|
from libc.stdint cimport int64_t
from libcpp cimport bool as c_bool
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string as c_string
from libcpp.utility cimport pair
from libcpp.vector cimport vector as c_vector
from ray.includes.common cimport (
CLanguage,
CRayStatus,
)
from ray.includes.unique_ids cimport (
CActorCheckpointID,
CActorID,
CClientID,
CJobID,
CWorkerID,
CObjectID,
CTaskID,
)
from ray.includes.task cimport CTaskSpec
cdef extern from "ray/protobuf/gcs.pb.h" nogil:
cdef cppclass GCSProfileEvent "ProfileTableData::ProfileEvent":
void set_event_type(const c_string &value)
void set_start_time(double value)
void set_end_time(double value)
c_string set_extra_data(const c_string &value)
GCSProfileEvent()
cdef cppclass GCSProfileTableData "ProfileTableData":
void set_component_type(const c_string &value)
void set_component_id(const c_string &value)
void set_node_ip_address(const c_string &value)
GCSProfileEvent *add_profile_events()
GCSProfileTableData()
ctypedef pair[c_vector[CObjectID], c_vector[CObjectID]] WaitResultPair
cdef extern from "ray/raylet/raylet_client.h" nogil:
cdef cppclass CRayletClient "ray::raylet::RayletClient":
CRayletClient(const c_string &raylet_socket,
const CWorkerID &worker_id,
c_bool is_worker, const CJobID &job_id,
const CLanguage &language)
CRayStatus Disconnect()
CRayStatus SubmitTask(const CTaskSpec &task_spec)
CRayStatus FetchOrReconstruct(c_vector[CObjectID] &object_ids,
c_bool fetch_only,
c_bool is_direct_call_task,
const CTaskID ¤t_task_id)
CRayStatus NotifyUnblocked(const CTaskID ¤t_task_id)
CRayStatus Wait(const c_vector[CObjectID] &object_ids,
int num_returns, int64_t timeout_milliseconds,
c_bool wait_local, c_bool is_direct_call_task,
const CTaskID ¤t_task_id,
WaitResultPair *result)
CRayStatus PushError(const CJobID &job_id, const c_string &type,
const c_string &error_message, double timestamp)
CRayStatus PushProfileEvents(
const GCSProfileTableData &profile_events)
CRayStatus FreeObjects(const c_vector[CObjectID] &object_ids,
c_bool local_only, c_bool delete_creating_tasks)
CRayStatus PrepareActorCheckpoint(const CActorID &actor_id,
CActorCheckpointID &checkpoint_id)
CRayStatus NotifyActorResumedFromCheckpoint(
const CActorID &actor_id, const CActorCheckpointID &checkpoint_id)
CRayStatus SetResource(const c_string &resource_name,
const double capacity,
const CClientID &client_Id)
CLanguage GetLanguage() const
CWorkerID GetWorkerID() const
CJobID GetJobID() const
c_bool IsWorker() const
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/ray_config.pxd
|
Cython
|
from libc.stdint cimport int64_t, uint64_t, uint32_t
from libcpp.string cimport string as c_string
from libcpp.unordered_map cimport unordered_map
cdef extern from "ray/common/ray_config.h" nogil:
cdef cppclass RayConfig "RayConfig":
@staticmethod
RayConfig &instance()
int64_t ray_cookie() const
int64_t handler_warning_timeout_ms() const
int64_t raylet_heartbeat_timeout_milliseconds() const
int64_t debug_dump_period_milliseconds() const
int64_t num_heartbeats_timeout() const
uint64_t num_heartbeats_warning() const
int64_t initial_reconstruction_timeout_milliseconds() const
int64_t get_timeout_milliseconds() const
uint64_t max_lineage_size() const
int64_t worker_get_request_size() const
int64_t worker_fetch_request_size() const
int64_t actor_max_dummy_objects() const
int64_t num_connect_attempts() const
int64_t connect_timeout_milliseconds() const
int64_t raylet_fetch_timeout_milliseconds() const
int64_t raylet_reconstruction_timeout_milliseconds() const
int64_t max_num_to_reconstruct() const
int64_t raylet_fetch_request_size() const
int64_t kill_worker_timeout_milliseconds() const
int64_t max_time_for_handler_milliseconds() const
int64_t size_limit() const
int64_t num_elements_limit() const
int64_t max_time_for_loop() const
int64_t redis_db_connect_retries()
int64_t redis_db_connect_wait_milliseconds() const
int64_t plasma_default_release_delay() const
int64_t L3_cache_size_bytes() const
int64_t max_tasks_to_spillback() const
int64_t actor_creation_num_spillbacks_warning() const
int node_manager_forward_task_retry_timeout_milliseconds() const
int object_manager_pull_timeout_ms() const
int object_manager_push_timeout_ms() const
int object_manager_repeated_push_delay_ms() const
uint64_t object_manager_default_chunk_size() const
int num_workers_per_process_python() const
int num_workers_per_process_java() const
int64_t max_task_lease_timeout_ms() const
uint32_t num_actor_checkpoints_to_keep() const
uint32_t maximum_gcs_deletion_batch_size() const
void initialize(const unordered_map[c_string, c_string] &config_map)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/task.pxd
|
Cython
|
from libc.stdint cimport uint8_t, uint64_t
from libcpp cimport bool as c_bool
from libcpp.memory cimport unique_ptr, shared_ptr
from libcpp.string cimport string as c_string
from libcpp.unordered_map cimport unordered_map
from libcpp.vector cimport vector as c_vector
from ray.includes.common cimport (
CLanguage,
ResourceSet,
)
from ray.includes.unique_ids cimport (
CActorID,
CJobID,
CObjectID,
CTaskID,
)
cdef extern from "ray/protobuf/common.pb.h" nogil:
cdef cppclass RpcTaskSpec "ray::rpc::TaskSpec":
void CopyFrom(const RpcTaskSpec &value)
cdef cppclass RpcTaskExecutionSpec "ray::rpc::TaskExecutionSpec":
void CopyFrom(const RpcTaskExecutionSpec &value)
void add_dependencies(const c_string &value)
cdef cppclass RpcTask "ray::rpc::Task":
RpcTaskSpec *mutable_task_spec()
cdef extern from "ray/protobuf/gcs.pb.h" nogil:
cdef cppclass TaskTableData "ray::rpc::TaskTableData":
RpcTask *mutable_task()
const c_string &SerializeAsString()
cdef extern from "ray/common/task/task_spec.h" nogil:
cdef cppclass CTaskSpec "ray::TaskSpecification":
CTaskSpec(const RpcTaskSpec message)
CTaskSpec(const c_string &serialized_binary)
const RpcTaskSpec &GetMessage()
c_string Serialize() const
CTaskID TaskId() const
CJobID JobId() const
CTaskID ParentTaskId() const
uint64_t ParentCounter() const
c_vector[c_string] FunctionDescriptor() const
c_string FunctionDescriptorString() const
uint64_t NumArgs() const
uint64_t NumReturns() const
c_bool ArgByRef(uint64_t arg_index) const
int ArgIdCount(uint64_t arg_index) const
CObjectID ArgId(uint64_t arg_index, uint64_t id_index) const
CObjectID ReturnIdForPlasma(uint64_t return_index) const
const uint8_t *ArgData(uint64_t arg_index) const
size_t ArgDataSize(uint64_t arg_index) const
const uint8_t *ArgMetadata(uint64_t arg_index) const
size_t ArgMetadataSize(uint64_t arg_index) const
double GetRequiredResource(const c_string &resource_name) const
const ResourceSet GetRequiredResources() const
const ResourceSet GetRequiredPlacementResources() const
c_bool IsDriverTask() const
CLanguage GetLanguage() const
c_bool IsNormalTask() const
c_bool IsActorCreationTask() const
c_bool IsActorTask() const
CActorID ActorCreationId() const
CObjectID ActorCreationDummyObjectId() const
CObjectID PreviousActorTaskDummyObjectId() const
uint64_t MaxActorReconstructions() const
CActorID ActorId() const
uint64_t ActorCounter() const
CObjectID ActorDummyObject() const
cdef extern from "ray/common/task/task_execution_spec.h" nogil:
cdef cppclass CTaskExecutionSpec "ray::TaskExecutionSpecification":
CTaskExecutionSpec(RpcTaskExecutionSpec message)
CTaskExecutionSpec(const c_string &serialized_binary)
const RpcTaskExecutionSpec &GetMessage()
c_vector[CObjectID] ExecutionDependencies()
uint64_t NumForwards()
cdef extern from "ray/common/task/task.h" nogil:
cdef cppclass CTask "ray::Task":
CTask(CTaskSpec task_spec, CTaskExecutionSpec task_execution_spec)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/includes/unique_ids.pxd
|
Cython
|
from libcpp cimport bool as c_bool
from libcpp.string cimport string as c_string
from libc.stdint cimport uint8_t, uint32_t, int64_t
cdef extern from "ray/common/id.h" namespace "ray" nogil:
cdef cppclass CBaseID[T]:
@staticmethod
T FromBinary(const c_string &binary)
@staticmethod
const T Nil()
@staticmethod
size_t Size()
size_t Hash() const
c_bool IsNil() const
c_bool operator==(const CBaseID &rhs) const
c_bool operator!=(const CBaseID &rhs) const
const uint8_t *data() const
c_string Binary() const
c_string Hex() const
cdef cppclass CUniqueID "ray::UniqueID"(CBaseID):
CUniqueID()
@staticmethod
size_t Size()
@staticmethod
CUniqueID FromRandom()
@staticmethod
CUniqueID FromBinary(const c_string &binary)
@staticmethod
const CUniqueID Nil()
@staticmethod
size_t Size()
cdef cppclass CActorCheckpointID "ray::ActorCheckpointID"(CUniqueID):
@staticmethod
CActorCheckpointID FromBinary(const c_string &binary)
cdef cppclass CActorClassID "ray::ActorClassID"(CUniqueID):
@staticmethod
CActorClassID FromBinary(const c_string &binary)
cdef cppclass CActorID "ray::ActorID"(CBaseID[CActorID]):
@staticmethod
CActorID FromBinary(const c_string &binary)
@staticmethod
const CActorID Nil()
@staticmethod
size_t Size()
@staticmethod
CActorID Of(CJobID job_id, CTaskID parent_task_id,
int64_t parent_task_counter)
cdef cppclass CClientID "ray::ClientID"(CUniqueID):
@staticmethod
CClientID FromBinary(const c_string &binary)
cdef cppclass CConfigID "ray::ConfigID"(CUniqueID):
@staticmethod
CConfigID FromBinary(const c_string &binary)
cdef cppclass CFunctionID "ray::FunctionID"(CUniqueID):
@staticmethod
CFunctionID FromBinary(const c_string &binary)
cdef cppclass CJobID "ray::JobID"(CBaseID[CJobID]):
@staticmethod
CJobID FromBinary(const c_string &binary)
@staticmethod
const CJobID Nil()
@staticmethod
size_t Size()
@staticmethod
CJobID FromInt(uint32_t value)
cdef cppclass CTaskID "ray::TaskID"(CBaseID[CTaskID]):
@staticmethod
CTaskID FromBinary(const c_string &binary)
@staticmethod
const CTaskID Nil()
@staticmethod
size_t Size()
@staticmethod
CTaskID ForDriverTask(const CJobID &job_id)
@staticmethod
CTaskID ForFakeTask()
@staticmethod
CTaskID ForActorCreationTask(CActorID actor_id)
@staticmethod
CTaskID ForActorTask(CJobID job_id, CTaskID parent_task_id,
int64_t parent_task_counter, CActorID actor_id)
@staticmethod
CTaskID ForNormalTask(CJobID job_id, CTaskID parent_task_id,
int64_t parent_task_counter)
cdef cppclass CObjectID" ray::ObjectID"(CBaseID[CObjectID]):
@staticmethod
int64_t MaxObjectIndex()
@staticmethod
CObjectID FromBinary(const c_string &binary)
@staticmethod
CObjectID FromRandom()
@staticmethod
const CObjectID Nil()
@staticmethod
CObjectID ForPut(const CTaskID &task_id, int64_t index,
int64_t transport_type)
@staticmethod
CObjectID ForTaskReturn(const CTaskID &task_id, int64_t index)
@staticmethod
size_t Size()
c_bool is_put()
c_bool IsDirectCallType()
CObjectID WithPlasmaTransportType()
int64_t ObjectIndex() const
CTaskID TaskId() const
cdef cppclass CWorkerID "ray::WorkerID"(CUniqueID):
@staticmethod
CWorkerID FromBinary(const c_string &binary)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/internal/__init__.py
|
Python
|
from ray.internal.internal_api import free
__all__ = ["free"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/internal/internal_api.py
|
Python
|
import ray.worker
from ray import profiling
__all__ = ["free"]
def free(object_ids, local_only=False, delete_creating_tasks=False):
"""Free a list of IDs from object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to object store. If
the some of the objects are in use, object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_ids (List[ObjectID]): List of object IDs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
delete_creating_tasks (bool): Whether also delete the object creating
tasks.
"""
worker = ray.worker.get_global_worker()
if isinstance(object_ids, ray.ObjectID):
object_ids = [object_ids]
if not isinstance(object_ids, list):
raise TypeError("free() expects a list of ObjectID, got {}".format(
type(object_ids)))
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ray.ObjectID):
raise TypeError("Attempting to call `free` on the value {}, "
"which is not an ray.ObjectID.".format(object_id))
if ray.worker._mode() == ray.worker.LOCAL_MODE:
worker.local_mode_manager.free(object_ids)
return
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_ids) == 0:
return
worker.core_worker.free_objects(object_ids, local_only,
delete_creating_tasks)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/local_mode_manager.py
|
Python
|
import copy
import traceback
import ray
from ray import ObjectID
from ray.utils import format_error_message
from ray.exceptions import RayTaskError
class LocalModeObjectID(ObjectID):
"""Wrapper class around ray.ObjectID used for local mode.
Object values are stored directly as a field of the LocalModeObjectID.
Attributes:
value: Field that stores object values. If this field does not exist,
it equates to the object not existing in the object store. This is
necessary because None is a valid object value.
"""
def __copy__(self):
new = LocalModeObjectID(self.binary())
if hasattr(self, "value"):
new.value = self.value
return new
def __deepcopy__(self, memo=None):
new = LocalModeObjectID(self.binary())
if hasattr(self, "value"):
new.value = self.value
return new
class LocalModeManager:
"""Used to emulate remote operations when running in local mode."""
def __init__(self):
"""Initialize a LocalModeManager."""
def execute(self, function, function_name, args, kwargs, num_return_vals):
"""Synchronously executes a "remote" function or actor method.
Stores results directly in the generated and returned
LocalModeObjectIDs. Any exceptions raised during function execution
will be stored under all returned object IDs and later raised by the
worker.
Args:
function: The function to execute.
function_name: Name of the function to execute.
args: Arguments to the function. These will not be modified by
the function execution.
kwargs: Keyword arguments to the function.
num_return_vals: Number of expected return values specified in the
function's decorator.
Returns:
LocalModeObjectIDs corresponding to the function return values.
"""
return_ids = [
LocalModeObjectID.from_random() for _ in range(num_return_vals)
]
new_args = []
for i, arg in enumerate(args):
if isinstance(arg, ObjectID):
new_args.append(ray.get(arg))
else:
new_args.append(copy.deepcopy(arg))
new_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, ObjectID):
new_kwargs[k] = ray.get(v)
else:
new_kwargs[k] = copy.deepcopy(v)
try:
results = function(*new_args, **new_kwargs)
if num_return_vals == 1:
return_ids[0].value = results
else:
for object_id, result in zip(return_ids, results):
object_id.value = result
except Exception as e:
backtrace = format_error_message(traceback.format_exc())
task_error = RayTaskError(function_name, backtrace, e.__class__)
for object_id in return_ids:
object_id.value = task_error
return return_ids
def put_object(self, value):
"""Store an object in the emulated object store.
Implemented by generating a LocalModeObjectID and storing the value
directly within it.
Args:
value: The value to store.
Returns:
LocalModeObjectID corresponding to the value.
"""
object_id = LocalModeObjectID.from_random()
object_id.value = value
return object_id
def get_objects(self, object_ids):
"""Fetch objects from the emulated object store.
Accepts only LocalModeObjectIDs and reads values directly from them.
Args:
object_ids: A list of object IDs to fetch values for.
Raises:
TypeError if any of the object IDs are not LocalModeObjectIDs.
KeyError if any of the object IDs do not contain values.
"""
results = []
for object_id in object_ids:
if not isinstance(object_id, LocalModeObjectID):
raise TypeError("Only LocalModeObjectIDs are supported "
"when running in LOCAL_MODE. Using "
"user-generated ObjectIDs will fail.")
if not hasattr(object_id, "value"):
raise KeyError("Value for {} not found".format(object_id))
results.append(object_id.value)
return results
def free(self, object_ids):
"""Delete objects from the emulated object store.
Accepts only LocalModeObjectIDs and deletes their values directly.
Args:
object_ids: A list of ObjectIDs to delete.
Raises:
TypeError if any of the object IDs are not LocalModeObjectIDs.
"""
for object_id in object_ids:
if not isinstance(object_id, LocalModeObjectID):
raise TypeError("Only LocalModeObjectIDs are supported "
"when running in LOCAL_MODE. Using "
"user-generated ObjectIDs will fail.")
try:
del object_id.value
except AttributeError:
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/log_monitor.py
|
Python
|
import argparse
import errno
import glob
import json
import logging
import os
import shutil
import time
import traceback
import ray.ray_constants as ray_constants
import ray.services as services
import ray.utils
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
class LogFileInfo:
def __init__(self,
filename=None,
size_when_last_opened=None,
file_position=None,
file_handle=None):
assert (filename is not None and size_when_last_opened is not None
and file_position is not None)
self.filename = filename
self.size_when_last_opened = size_when_last_opened
self.file_position = file_position
self.file_handle = file_handle
self.worker_pid = None
class LogMonitor:
"""A monitor process for monitoring Ray log files.
This class mantains a list of open files and a list of closed log files. We
can't simply leave all files open because we'll run out of file
descriptors.
The "run" method of this class will cycle between doing several things:
1. First, it will check if any new files have appeared in the log
directory. If so, they will be added to the list of closed files.
2. Then, if we are unable to open any new files, we will close all of the
files.
3. Then, we will open as many closed files as we can that may have new
lines (judged by an increase in file size since the last time the file
was opened).
4. Then we will loop through the open files and see if there are any new
lines in the file. If so, we will publish them to Redis.
Attributes:
host (str): The hostname of this machine. Used to improve the log
messages published to Redis.
logs_dir (str): The directory that the log files are in.
redis_client: A client used to communicate with the Redis server.
log_filenames (set): This is the set of filenames of all files in
open_file_infos and closed_file_infos.
open_file_infos (list[LogFileInfo]): Info for all of the open files.
closed_file_infos (list[LogFileInfo]): Info for all of the closed
files.
can_open_more_files (bool): True if we can still open more files and
false otherwise.
"""
def __init__(self, logs_dir, redis_address, redis_password=None):
"""Initialize the log monitor object."""
self.ip = services.get_node_ip_address()
self.logs_dir = logs_dir
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.log_filenames = set()
self.open_file_infos = []
self.closed_file_infos = []
self.can_open_more_files = True
def close_all_files(self):
"""Close all open files (so that we can open more)."""
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
try:
# Test if the worker process that generated the log file
# is still alive. Only applies to worker processes.
if file_info.worker_pid != "raylet":
os.kill(file_info.worker_pid, 0)
except OSError:
# The process is not alive any more, so move the log file
# out of the log directory so glob.glob will not be slowed
# by it.
target = os.path.join(self.logs_dir, "old",
os.path.basename(file_info.filename))
try:
shutil.move(file_info.filename, target)
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning("Warning: The file {} was not "
"found.".format(file_info.filename))
else:
raise e
else:
self.closed_file_infos.append(file_info)
self.can_open_more_files = True
def update_log_filenames(self):
"""Update the list of log files to monitor."""
# output of user code is written here
log_file_paths = glob.glob("{}/worker*[.out|.err]".format(
self.logs_dir))
# segfaults and other serious errors are logged here
raylet_err_paths = glob.glob("{}/raylet*.err".format(self.logs_dir))
for file_path in log_file_paths + raylet_err_paths:
if os.path.isfile(
file_path) and file_path not in self.log_filenames:
self.log_filenames.add(file_path)
self.closed_file_infos.append(
LogFileInfo(
filename=file_path,
size_when_last_opened=0,
file_position=0,
file_handle=None))
log_filename = os.path.basename(file_path)
logger.info("Beginning to track file {}".format(log_filename))
def open_closed_files(self):
"""Open some closed files if they may have new lines.
Opening more files may require us to close some of the already open
files.
"""
if not self.can_open_more_files:
# If we can't open any more files. Close all of the files.
self.close_all_files()
files_with_no_updates = []
while len(self.closed_file_infos) > 0:
if (len(self.open_file_infos) >=
ray_constants.LOG_MONITOR_MAX_OPEN_FILES):
self.can_open_more_files = False
break
file_info = self.closed_file_infos.pop(0)
assert file_info.file_handle is None
# Get the file size to see if it has gotten bigger since we last
# opened it.
try:
file_size = os.path.getsize(file_info.filename)
except (IOError, OSError) as e:
# Catch "file not found" errors.
if e.errno == errno.ENOENT:
logger.warning("Warning: The file {} was not "
"found.".format(file_info.filename))
self.log_filenames.remove(file_info.filename)
continue
raise e
# If some new lines have been added to this file, try to reopen the
# file.
if file_size > file_info.size_when_last_opened:
try:
f = open(file_info.filename, "rb")
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning("Warning: The file {} was not "
"found.".format(file_info.filename))
self.log_filenames.remove(file_info.filename)
continue
else:
raise e
f.seek(file_info.file_position)
file_info.filesize_when_last_opened = file_size
file_info.file_handle = f
self.open_file_infos.append(file_info)
else:
files_with_no_updates.append(file_info)
# Add the files with no changes back to the list of closed files.
self.closed_file_infos += files_with_no_updates
def check_log_files_and_publish_updates(self):
"""Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
"""
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
try:
next_line = file_info.file_handle.readline()
# Replace any characters not in UTF-8 with
# a replacement character, see
# https://stackoverflow.com/a/38565489/10891801
next_line = next_line.decode("utf-8", "replace")
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
except Exception:
logger.error("Error: Reading file: {}, position: {} "
"failed.".format(
file_info.full_path,
file_info.file_info.file_handle.tell()))
raise
if file_info.file_position == 0:
if (len(lines_to_publish) > 0 and
lines_to_publish[0].startswith("Ray worker pid: ")):
file_info.worker_pid = int(
lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
elif "/raylet" in file_info.filename:
file_info.worker_pid = "raylet"
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps({
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish
}))
anything_published = True
return anything_published
def run(self):
"""Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis.
"""
while True:
self.update_log_filenames()
self.open_closed_files()
anything_published = self.check_log_files_and_publish_updates()
# If nothing was published, then wait a little bit before checking
# for logs to avoid using too much CPU.
if not anything_published:
time.sleep(0.05)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"log monitor to connect "
"to."))
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--logs-dir",
required=True,
type=str,
help="Specify the path of the temporary directory used by Ray "
"processes.")
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
log_monitor = LogMonitor(
args.logs_dir, args.redis_address, redis_password=args.redis_password)
try:
log_monitor.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The log monitor on node {} failed with the following "
"error:\n{}".format(os.uname()[1], traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.LOG_MONITOR_DIED_ERROR, message)
raise e
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/memory_monitor.py
|
Python
|
import logging
import os
import sys
import time
try:
import psutil
except ImportError:
psutil = None
logger = logging.getLogger(__name__)
def get_rss(memory_info):
"""Get the estimated non-shared memory usage from psutil memory_info."""
mem = memory_info.rss
# OSX doesn't have the shared attribute
if hasattr(memory_info, "shared"):
mem -= memory_info.shared
return mem
def get_shared(virtual_memory):
"""Get the estimated shared memory usage from psutil virtual mem info."""
# OSX doesn't have the shared attribute
if hasattr(virtual_memory, "shared"):
return virtual_memory.shared
else:
return 0
class RayOutOfMemoryError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
@staticmethod
def get_message(used_gb, total_gb, threshold):
pids = psutil.pids()
proc_stats = []
for pid in pids:
proc = psutil.Process(pid)
proc_stats.append((get_rss(proc.memory_info()), pid,
proc.cmdline()))
proc_str = "PID\tMEM\tCOMMAND"
for rss, pid, cmdline in sorted(proc_stats, reverse=True)[:10]:
proc_str += "\n{}\t{}GiB\t{}".format(
pid, round(rss / (1024**3), 2),
" ".join(cmdline)[:100].strip())
return ("More than {}% of the memory on ".format(int(
100 * threshold)) + "node {} is used ({} / {} GB). ".format(
os.uname()[1], round(used_gb, 2), round(total_gb, 2)) +
"The top 10 memory consumers are:\n\n{}".format(proc_str) +
"\n\nIn addition, up to {} GiB of shared memory is ".format(
round(get_shared(psutil.virtual_memory()) / (1024**3), 2))
+ "currently being used by the Ray object store. You can set "
"the object store size with the `object_store_memory` "
"parameter when starting Ray, and the max Redis size with "
"`redis_max_memory`. Note that Ray assumes all system "
"memory is available for use by workers. If your system "
"has other applications running, you should manually set "
"these memory limits to a lower value.")
class MemoryMonitor:
"""Helper class for raising errors on low memory.
This presents a much cleaner error message to users than what would happen
if we actually ran out of memory.
The monitor tries to use the cgroup memory limit and usage if it is set
and available so that it is more reasonable inside containers. Otherwise,
it uses `psutil` to check the memory usage.
The environment variable `RAY_MEMORY_MONITOR_ERROR_THRESHOLD` can be used
to overwrite the default error_threshold setting.
"""
def __init__(self, error_threshold=0.95, check_interval=1):
# Note: it takes ~50us to check the memory usage through psutil, so
# throttle this check at most once a second or so.
self.check_interval = check_interval
self.last_checked = 0
self.heap_limit = None
self.worker_name = None
try:
self.error_threshold = float(
os.getenv("RAY_MEMORY_MONITOR_ERROR_THRESHOLD"))
except (ValueError, TypeError):
self.error_threshold = error_threshold
# Try to read the cgroup memory limit if it is available.
try:
with open("/sys/fs/cgroup/memory/memory.limit_in_bytes",
"rb") as f:
self.cgroup_memory_limit_gb = int(f.read()) / (1024**3)
except IOError:
self.cgroup_memory_limit_gb = sys.maxsize / (1024**3)
if not psutil:
print("WARNING: Not monitoring node memory since `psutil` is not "
"installed. Install this with `pip install psutil` "
"(or ray[debug]) to enable debugging of memory-related "
"crashes.")
def set_heap_limit(self, worker_name, limit_bytes):
self.heap_limit = limit_bytes
self.worker_name = worker_name
def raise_if_low_memory(self):
if psutil is None:
return # nothing we can do
if time.time() - self.last_checked > self.check_interval:
if "RAY_DEBUG_DISABLE_MEMORY_MONITOR" in os.environ:
return # escape hatch, not intended for user use
self.last_checked = time.time()
total_gb = psutil.virtual_memory().total / (1024**3)
used_gb = total_gb - psutil.virtual_memory().available / (1024**3)
if self.cgroup_memory_limit_gb < total_gb:
total_gb = self.cgroup_memory_limit_gb
with open("/sys/fs/cgroup/memory/memory.usage_in_bytes",
"rb") as f:
used_gb = int(f.read()) / (1024**3)
if used_gb > total_gb * self.error_threshold:
raise RayOutOfMemoryError(
RayOutOfMemoryError.get_message(used_gb, total_gb,
self.error_threshold))
else:
logger.debug("Memory usage is {} / {}".format(
used_gb, total_gb))
if self.heap_limit:
mem_info = psutil.Process(os.getpid()).memory_info()
heap_size = get_rss(mem_info)
if heap_size > self.heap_limit:
raise RayOutOfMemoryError(
"Heap memory usage for {} is {} / {} GiB limit".format(
self.worker_name, round(heap_size / (1024**3), 4),
round(self.heap_limit / (1024**3), 4)))
elif heap_size > 0.8 * self.heap_limit:
logger.warning(
"Heap memory usage for {} is {} / {} GiB limit".format(
self.worker_name, round(heap_size / (1024**3), 4),
round(self.heap_limit / (1024**3), 4)))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/monitor.py
|
Python
|
import argparse
import logging
import os
import time
import traceback
import json
import redis
import ray
from ray.autoscaler.autoscaler import LoadMetrics, StandardAutoscaler
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray.utils
import ray.ray_constants as ray_constants
from ray.utils import (binary_to_hex, binary_to_object_id, binary_to_task_id,
hex_to_binary, setup_logger)
logger = logging.getLogger(__name__)
class Monitor:
"""A monitor for Ray processes.
The monitor is in charge of cleaning up the tables in the global state
after processes have died. The monitor is currently not responsible for
detecting component failures.
Attributes:
redis: A connection to the Redis server.
primary_subscribe_client: A pubsub client for the Redis server.
This is used to receive notifications about failed components.
"""
def __init__(self, redis_address, autoscaling_config, redis_password=None):
# Initialize the Redis clients.
ray.state.state._initialize_global_state(
redis_address, redis_password=redis_password)
self.redis = ray.services.create_redis_client(
redis_address, password=redis_password)
# Setup subscriptions to the primary Redis server and the Redis shards.
self.primary_subscribe_client = self.redis.pubsub(
ignore_subscribe_messages=True)
# Keep a mapping from raylet client ID to IP address to use
# for updating the load metrics.
self.raylet_id_to_ip_map = {}
self.load_metrics = LoadMetrics()
if autoscaling_config:
self.autoscaler = StandardAutoscaler(autoscaling_config,
self.load_metrics)
else:
self.autoscaler = None
# Experimental feature: GCS flushing.
self.issue_gcs_flushes = "RAY_USE_NEW_GCS" in os.environ
self.gcs_flush_policy = None
if self.issue_gcs_flushes:
# Data is stored under the first data shard, so we issue flushes to
# that redis server.
addr_port = self.redis.lrange("RedisShards", 0, -1)
if len(addr_port) > 1:
logger.warning(
"Monitor: "
"TODO: if launching > 1 redis shard, flushing needs to "
"touch shards in parallel.")
self.issue_gcs_flushes = False
else:
addr_port = addr_port[0].split(b":")
self.redis_shard = redis.StrictRedis(
host=addr_port[0],
port=addr_port[1],
password=redis_password)
try:
self.redis_shard.execute_command("HEAD.FLUSH 0")
except redis.exceptions.ResponseError as e:
logger.info(
"Monitor: "
"Turning off flushing due to exception: {}".format(
str(e)))
self.issue_gcs_flushes = False
def __del__(self):
"""Destruct the monitor object."""
# We close the pubsub client to avoid leaking file descriptors.
self.primary_subscribe_client.close()
def subscribe(self, channel):
"""Subscribe to the given channel on the primary Redis shard.
Args:
channel (str): The channel to subscribe to.
Raises:
Exception: An exception is raised if the subscription fails.
"""
self.primary_subscribe_client.subscribe(channel)
def xray_heartbeat_batch_handler(self, unused_channel, data):
"""Handle an xray heartbeat batch message from Redis."""
gcs_entries = ray.gcs_utils.GcsEntry.FromString(data)
heartbeat_data = gcs_entries.entries[0]
message = ray.gcs_utils.HeartbeatBatchTableData.FromString(
heartbeat_data)
for heartbeat_message in message.batch:
resource_load = dict(
zip(heartbeat_message.resource_load_label,
heartbeat_message.resource_load_capacity))
total_resources = dict(
zip(heartbeat_message.resources_total_label,
heartbeat_message.resources_total_capacity))
available_resources = dict(
zip(heartbeat_message.resources_available_label,
heartbeat_message.resources_available_capacity))
for resource in total_resources:
available_resources.setdefault(resource, 0.0)
# Update the load metrics for this raylet.
client_id = ray.utils.binary_to_hex(heartbeat_message.client_id)
ip = self.raylet_id_to_ip_map.get(client_id)
if ip:
self.load_metrics.update(ip, total_resources,
available_resources, resource_load)
else:
logger.warning(
"Monitor: "
"could not find ip for client {}".format(client_id))
def _xray_clean_up_entries_for_job(self, job_id):
"""Remove this job's object/task entries from redis.
Removes control-state entries of all tasks and task return
objects belonging to the driver.
Args:
job_id: The job id.
"""
xray_task_table_prefix = (
ray.gcs_utils.TablePrefix_RAYLET_TASK_string.encode("ascii"))
xray_object_table_prefix = (
ray.gcs_utils.TablePrefix_OBJECT_string.encode("ascii"))
task_table_objects = ray.tasks()
job_id_hex = binary_to_hex(job_id)
job_task_id_bins = set()
for task_id_hex, task_info in task_table_objects.items():
task_table_object = task_info["TaskSpec"]
task_job_id_hex = task_table_object["JobID"]
if job_id_hex != task_job_id_hex:
# Ignore tasks that aren't from this driver.
continue
job_task_id_bins.add(hex_to_binary(task_id_hex))
# Get objects associated with the driver.
object_table_objects = ray.objects()
job_object_id_bins = set()
for object_id, _ in object_table_objects.items():
task_id_bin = ray._raylet.compute_task_id(object_id).binary()
if task_id_bin in job_task_id_bins:
job_object_id_bins.add(object_id.binary())
def to_shard_index(id_bin):
if len(id_bin) == ray.TaskID.size():
return binary_to_task_id(id_bin).redis_shard_hash() % len(
ray.state.state.redis_clients)
else:
return binary_to_object_id(id_bin).redis_shard_hash() % len(
ray.state.state.redis_clients)
# Form the redis keys to delete.
sharded_keys = [[] for _ in range(len(ray.state.state.redis_clients))]
for task_id_bin in job_task_id_bins:
sharded_keys[to_shard_index(task_id_bin)].append(
xray_task_table_prefix + task_id_bin)
for object_id_bin in job_object_id_bins:
sharded_keys[to_shard_index(object_id_bin)].append(
xray_object_table_prefix + object_id_bin)
# Remove with best effort.
for shard_index in range(len(sharded_keys)):
keys = sharded_keys[shard_index]
if len(keys) == 0:
continue
redis = ray.state.state.redis_clients[shard_index]
num_deleted = redis.delete(*keys)
logger.info("Monitor: "
"Removed {} dead redis entries of the "
"driver from redis shard {}.".format(
num_deleted, shard_index))
if num_deleted != len(keys):
logger.warning("Monitor: "
"Failed to remove {} relevant redis "
"entries from redis shard {}.".format(
len(keys) - num_deleted, shard_index))
def xray_job_notification_handler(self, unused_channel, data):
"""Handle a notification that a job has been added or removed.
Args:
unused_channel: The message channel.
data: The message data.
"""
gcs_entries = ray.gcs_utils.GcsEntry.FromString(data)
job_data = gcs_entries.entries[0]
message = ray.gcs_utils.JobTableData.FromString(job_data)
job_id = message.job_id
if message.is_dead:
logger.info("Monitor: "
"XRay Driver {} has been removed.".format(
binary_to_hex(job_id)))
self._xray_clean_up_entries_for_job(job_id)
def autoscaler_resource_request_handler(self, _, data):
"""Handle a notification of a resource request for the autoscaler.
Args:
channel: unused
data: a resource request as JSON, e.g. {"CPU": 1}
"""
if not self.autoscaler:
return
try:
self.autoscaler.request_resources(json.loads(data))
except Exception:
# We don't want this to kill the monitor.
traceback.print_exc()
def process_messages(self, max_messages=10000):
"""Process all messages ready in the subscription channels.
This reads messages from the subscription channels and calls the
appropriate handlers until there are no messages left.
Args:
max_messages: The maximum number of messages to process before
returning.
"""
subscribe_clients = [self.primary_subscribe_client]
for subscribe_client in subscribe_clients:
for _ in range(max_messages):
message = subscribe_client.get_message()
if message is None:
# Continue on to the next subscribe client.
break
# Parse the message.
channel = message["channel"]
data = message["data"]
# Determine the appropriate message handler.
if channel == ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL:
# Similar functionality as raylet info channel
message_handler = self.xray_heartbeat_batch_handler
elif channel == ray.gcs_utils.XRAY_JOB_CHANNEL:
# Handles driver death.
message_handler = self.xray_job_notification_handler
elif (channel ==
ray.ray_constants.AUTOSCALER_RESOURCE_REQUEST_CHANNEL):
message_handler = self.autoscaler_resource_request_handler
else:
raise Exception("This code should be unreachable.")
# Call the handler.
message_handler(channel, data)
def update_raylet_map(self, _append_port=False):
"""Updates internal raylet map.
Args:
_append_port (bool): Defaults to False. Appending the port is
useful in testing, as mock clusters have many nodes with
the same IP and cannot be uniquely identified.
"""
all_raylet_nodes = ray.nodes()
self.raylet_id_to_ip_map = {}
for raylet_info in all_raylet_nodes:
node_id = (raylet_info.get("DBClientID") or raylet_info["NodeID"])
ip_address = (raylet_info.get("AuxAddress")
or raylet_info["NodeManagerAddress"]).split(":")[0]
if _append_port:
ip_address += ":" + str(raylet_info["NodeManagerPort"])
self.raylet_id_to_ip_map[node_id] = ip_address
def _maybe_flush_gcs(self):
"""Experimental: issue a flush request to the GCS.
The purpose of this feature is to control GCS memory usage.
To activate this feature, Ray must be compiled with the flag
RAY_USE_NEW_GCS set, and Ray must be started at run time with the flag
as well.
"""
if not self.issue_gcs_flushes:
return
if self.gcs_flush_policy is None:
serialized = self.redis.get("gcs_flushing_policy")
if serialized is None:
# Client has not set any policy; by default flushing is off.
return
self.gcs_flush_policy = pickle.loads(serialized)
if not self.gcs_flush_policy.should_flush(self.redis_shard):
return
max_entries_to_flush = self.gcs_flush_policy.num_entries_to_flush()
num_flushed = self.redis_shard.execute_command(
"HEAD.FLUSH {}".format(max_entries_to_flush))
logger.info("Monitor: num_flushed {}".format(num_flushed))
# This flushes event log and log files.
ray.experimental.flush_redis_unsafe(self.redis)
self.gcs_flush_policy.record_flush()
def _run(self):
"""Run the monitor.
This function loops forever, checking for messages about dead database
clients and cleaning up state accordingly.
"""
# Initialize the subscription channel.
self.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL)
self.subscribe(ray.gcs_utils.XRAY_JOB_CHANNEL)
if self.autoscaler:
self.subscribe(
ray.ray_constants.AUTOSCALER_RESOURCE_REQUEST_CHANNEL)
# TODO(rkn): If there were any dead clients at startup, we should clean
# up the associated state in the state tables.
# Handle messages from the subscription channels.
while True:
# Update the mapping from raylet client ID to IP address.
# This is only used to update the load metrics for the autoscaler.
self.update_raylet_map()
# Process autoscaling actions
if self.autoscaler:
self.autoscaler.update()
self._maybe_flush_gcs()
# Process a round of messages.
self.process_messages()
# Wait for a heartbeat interval before processing the next round of
# messages.
time.sleep(
ray._config.raylet_heartbeat_timeout_milliseconds() * 1e-3)
def run(self):
try:
self._run()
except Exception:
logger.exception("Error in monitor loop")
if self.autoscaler:
self.autoscaler.kill_workers()
raise
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"monitor to connect to."))
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="the address to use for Redis")
parser.add_argument(
"--autoscaling-config",
required=False,
type=str,
help="the path to the autoscaling config file")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
args = parser.parse_args()
setup_logger(args.logging_level, args.logging_format)
if args.autoscaling_config:
autoscaling_config = os.path.expanduser(args.autoscaling_config)
else:
autoscaling_config = None
monitor = Monitor(
args.redis_address,
autoscaling_config,
redis_password=args.redis_password)
try:
monitor.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = "The monitor failed with the following error:\n{}".format(
traceback_str)
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.MONITOR_DIED_ERROR, message)
raise e
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/node.py
|
Python
|
import atexit
import collections
import datetime
import errno
import json
import os
import logging
import signal
import socket
import sys
import tempfile
import threading
import time
import ray
import ray.ray_constants as ray_constants
import ray.services
from ray.resource_spec import ResourceSpec
from ray.utils import try_to_create_directory, try_to_symlink
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray configures it by default automatically
# using logging.basicConfig in its entry/init points.
logger = logging.getLogger(__name__)
SESSION_LATEST = "session_latest"
class Node:
"""An encapsulation of the Ray processes on a single node.
This class is responsible for starting Ray processes and killing them,
and it also controls the temp file policy.
Attributes:
all_processes (dict): A mapping from process type (str) to a list of
ProcessInfo objects. All lists have length one except for the Redis
server list, which has multiple.
"""
def __init__(self,
ray_params,
head=False,
shutdown_at_exit=True,
spawn_reaper=True,
connect_only=False):
"""Start a node.
Args:
ray_params (ray.params.RayParams): The parameters to use to
configure the node.
head (bool): True if this is the head node, which means it will
start additional processes like the Redis servers, monitor
processes, and web UI.
shutdown_at_exit (bool): If true, spawned processes will be cleaned
up if this process exits normally.
spawn_reaper (bool): If true, spawns a process that will clean up
other spawned processes if this process dies unexpectedly.
connect_only (bool): If true, connect to the node without starting
new processes.
"""
if shutdown_at_exit:
if connect_only:
raise ValueError("'shutdown_at_exit' and 'connect_only' "
"cannot both be true.")
self._register_shutdown_hooks()
self.head = head
self.all_processes = {}
# Try to get node IP address with the parameters.
if ray_params.node_ip_address:
node_ip_address = ray_params.node_ip_address
elif ray_params.redis_address:
node_ip_address = ray.services.get_node_ip_address(
ray_params.redis_address)
else:
node_ip_address = ray.services.get_node_ip_address()
self._node_ip_address = node_ip_address
ray_params.update_if_absent(
include_log_monitor=True,
resources={},
temp_dir="/tmp/ray",
worker_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"workers/default_worker.py"))
self._resource_spec = None
self._ray_params = ray_params
self._redis_address = ray_params.redis_address
self._config = (json.loads(ray_params._internal_config)
if ray_params._internal_config else None)
if head:
redis_client = None
# date including microsecond
date_str = datetime.datetime.today().strftime(
"%Y-%m-%d_%H-%M-%S_%f")
self.session_name = "session_{date_str}_{pid}".format(
pid=os.getpid(), date_str=date_str)
else:
redis_client = self.create_redis_client()
self.session_name = ray.utils.decode(
redis_client.get("session_name"))
self._init_temp(redis_client)
if connect_only:
# Get socket names from the configuration.
self._plasma_store_socket_name = (
ray_params.plasma_store_socket_name)
self._raylet_socket_name = ray_params.raylet_socket_name
# If user does not provide the socket name, get it from Redis.
if (self._plasma_store_socket_name is None
or self._raylet_socket_name is None
or self._ray_params.node_manager_port is None):
# Get the address info of the processes to connect to
# from Redis.
address_info = ray.services.get_address_info_from_redis(
self.redis_address,
self._node_ip_address,
redis_password=self.redis_password)
self._plasma_store_socket_name = address_info[
"object_store_address"]
self._raylet_socket_name = address_info["raylet_socket_name"]
self._ray_params.node_manager_port = address_info[
"node_manager_port"]
else:
# If the user specified a socket name, use it.
self._plasma_store_socket_name = self._prepare_socket_file(
self._ray_params.plasma_store_socket_name,
default_prefix="plasma_store")
self._raylet_socket_name = self._prepare_socket_file(
self._ray_params.raylet_socket_name, default_prefix="raylet")
if head:
ray_params.update_if_absent(num_redis_shards=1)
self._webui_url = None
else:
self._webui_url = (
ray.services.get_webui_url_from_redis(redis_client))
ray_params.include_java = (
ray.services.include_java_from_redis(redis_client))
if head or not connect_only:
# We need to start a local raylet.
if (self._ray_params.node_manager_port is None
or self._ray_params.node_manager_port == 0):
# No port specified. Pick a random port for the raylet to use.
# NOTE: There is a possible but unlikely race condition where
# the port is bound by another process between now and when the
# raylet starts.
self._ray_params.node_manager_port = self._get_unused_port()
if not connect_only and spawn_reaper:
self.start_reaper_process()
# Start processes.
if head:
self.start_head_processes()
redis_client = self.create_redis_client()
redis_client.set("session_name", self.session_name)
redis_client.set("session_dir", self._session_dir)
redis_client.set("temp_dir", self._temp_dir)
if not connect_only:
self.start_ray_processes()
def _register_shutdown_hooks(self):
# Register the atexit handler. In this case, we shouldn't call sys.exit
# as we're already in the exit procedure.
def atexit_handler(*args):
self.kill_all_processes(check_alive=False, allow_graceful=True)
atexit.register(atexit_handler)
# Register the handler to be called if we get a SIGTERM.
# In this case, we want to exit with an error code (1) after
# cleaning up child processes.
def sigterm_handler(signum, frame):
self.kill_all_processes(check_alive=False, allow_graceful=True)
sys.exit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
def _init_temp(self, redis_client):
# Create an dictionary to store temp file index.
self._incremental_dict = collections.defaultdict(lambda: 0)
if self.head:
self._temp_dir = self._ray_params.temp_dir
else:
self._temp_dir = ray.utils.decode(redis_client.get("temp_dir"))
try_to_create_directory(self._temp_dir)
if self.head:
self._session_dir = os.path.join(self._temp_dir, self.session_name)
else:
self._session_dir = ray.utils.decode(
redis_client.get("session_dir"))
session_symlink = os.path.join(self._temp_dir, SESSION_LATEST)
# Send a warning message if the session exists.
try_to_create_directory(self._session_dir)
try_to_symlink(session_symlink, self._session_dir)
# Create a directory to be used for socket files.
self._sockets_dir = os.path.join(self._session_dir, "sockets")
try_to_create_directory(self._sockets_dir)
# Create a directory to be used for process log files.
self._logs_dir = os.path.join(self._session_dir, "logs")
try_to_create_directory(self._logs_dir)
old_logs_dir = os.path.join(self._logs_dir, "old")
try_to_create_directory(old_logs_dir)
def get_resource_spec(self):
"""Resolve and return the current resource spec for the node."""
if not self._resource_spec:
self._resource_spec = ResourceSpec(
self._ray_params.num_cpus, self._ray_params.num_gpus,
self._ray_params.memory, self._ray_params.object_store_memory,
self._ray_params.resources,
self._ray_params.redis_max_memory).resolve(is_head=self.head)
return self._resource_spec
@property
def node_ip_address(self):
"""Get the cluster Redis address."""
return self._node_ip_address
@property
def address(self):
"""Get the cluster address."""
return self._redis_address
@property
def redis_address(self):
"""Get the cluster Redis address."""
return self._redis_address
@property
def redis_password(self):
"""Get the cluster Redis password"""
return self._ray_params.redis_password
@property
def load_code_from_local(self):
return self._ray_params.load_code_from_local
@property
def use_pickle(self):
return self._ray_params.use_pickle
@property
def object_id_seed(self):
"""Get the seed for deterministic generation of object IDs"""
return self._ray_params.object_id_seed
@property
def plasma_store_socket_name(self):
"""Get the node's plasma store socket name."""
return self._plasma_store_socket_name
@property
def unique_id(self):
"""Get a unique identifier for this node."""
return "{}:{}".format(self.node_ip_address,
self._plasma_store_socket_name)
@property
def webui_url(self):
"""Get the cluster's web UI url."""
return self._webui_url
@property
def raylet_socket_name(self):
"""Get the node's raylet socket name."""
return self._raylet_socket_name
@property
def node_manager_port(self):
"""Get the node manager's port."""
return self._ray_params.node_manager_port
@property
def address_info(self):
"""Get a dictionary of addresses."""
return {
"node_ip_address": self._node_ip_address,
"redis_address": self._redis_address,
"object_store_address": self._plasma_store_socket_name,
"raylet_socket_name": self._raylet_socket_name,
"webui_url": self._webui_url,
"session_dir": self._session_dir,
}
def create_redis_client(self):
"""Create a redis client."""
return ray.services.create_redis_client(
self._redis_address, self._ray_params.redis_password)
def get_temp_dir_path(self):
"""Get the path of the temporary directory."""
return self._temp_dir
def get_session_dir_path(self):
"""Get the path of the session directory."""
return self._session_dir
def get_logs_dir_path(self):
"""Get the path of the log files directory."""
return self._logs_dir
def get_sockets_dir_path(self):
"""Get the path of the sockets directory."""
return self._sockets_dir
def _make_inc_temp(self, suffix="", prefix="", directory_name="/tmp/ray"):
"""Return a incremental temporary file name. The file is not created.
Args:
suffix (str): The suffix of the temp file.
prefix (str): The prefix of the temp file.
directory_name (str) : The base directory of the temp file.
Returns:
A string of file name. If there existing a file having
the same name, the returned name will look like
"{directory_name}/{prefix}.{unique_index}{suffix}"
"""
directory_name = os.path.expanduser(directory_name)
index = self._incremental_dict[suffix, prefix, directory_name]
# `tempfile.TMP_MAX` could be extremely large,
# so using `range` in Python2.x should be avoided.
while index < tempfile.TMP_MAX:
if index == 0:
filename = os.path.join(directory_name, prefix + suffix)
else:
filename = os.path.join(directory_name,
prefix + "." + str(index) + suffix)
index += 1
if not os.path.exists(filename):
# Save the index.
self._incremental_dict[suffix, prefix, directory_name] = index
return filename
raise FileExistsError(errno.EEXIST,
"No usable temporary filename found")
def new_log_files(self, name, redirect_output=True):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for
logging stdout and stderr and false if stdout and stderr
should not be redirected.
If it is None, it will use the "redirect_output" Ray parameter.
Returns:
If redirect_output is true, this will return a tuple of two
file handles. The first is for redirecting stdout and the
second is for redirecting stderr.
If redirect_output is false, this will return a tuple
of two None objects.
"""
if redirect_output is None:
redirect_output = self._ray_params.redirect_output
if not redirect_output:
return None, None
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=self._logs_dir)
# Line-buffer the output (mode 1).
log_stdout_file = open(log_stdout, "a", buffering=1)
log_stderr_file = open(log_stderr, "a", buffering=1)
return log_stdout_file, log_stderr_file
def _get_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
def _prepare_socket_file(self, socket_path, default_prefix):
"""Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare.
"""
if socket_path is not None:
if os.path.exists(socket_path):
raise Exception("Socket file {} exists!".format(socket_path))
socket_dir = os.path.dirname(socket_path)
try_to_create_directory(socket_dir)
return socket_path
return self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir)
def start_reaper_process(self):
"""
Start the reaper process.
This must be the first process spawned and should only be called when
ray processes should be cleaned up if this process dies.
"""
process_info = ray.services.start_reaper()
assert ray_constants.PROCESS_TYPE_REAPER not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_REAPER] = [
process_info
]
def start_redis(self):
"""Start the Redis servers."""
assert self._redis_address is None
redis_log_files = [self.new_log_files("redis")]
for i in range(self._ray_params.num_redis_shards):
redis_log_files.append(self.new_log_files("redis-shard_" + str(i)))
(self._redis_address, redis_shards,
process_infos) = ray.services.start_redis(
self._node_ip_address,
redis_log_files,
self.get_resource_spec(),
port=self._ray_params.redis_port,
redis_shard_ports=self._ray_params.redis_shard_ports,
num_redis_shards=self._ray_params.num_redis_shards,
redis_max_clients=self._ray_params.redis_max_clients,
redirect_worker_output=True,
password=self._ray_params.redis_password,
include_java=self._ray_params.include_java)
assert (
ray_constants.PROCESS_TYPE_REDIS_SERVER not in self.all_processes)
self.all_processes[ray_constants.PROCESS_TYPE_REDIS_SERVER] = (
process_infos)
def start_log_monitor(self):
"""Start the log monitor."""
stdout_file, stderr_file = self.new_log_files("log_monitor")
process_info = ray.services.start_log_monitor(
self.redis_address,
self._logs_dir,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_LOG_MONITOR not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] = [
process_info
]
def start_reporter(self):
"""Start the reporter."""
stdout_file, stderr_file = self.new_log_files("reporter", True)
process_info = ray.services.start_reporter(
self.redis_address,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_REPORTER not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_REPORTER] = [
process_info
]
def start_dashboard(self, require_webui):
"""Start the dashboard.
Args:
require_webui (bool): If true, this will raise an exception if we
fail to start the webui. Otherwise it will print a warning if
we fail to start the webui.
"""
stdout_file, stderr_file = self.new_log_files("dashboard", True)
self._webui_url, process_info = ray.services.start_dashboard(
require_webui,
self._ray_params.webui_host,
self.redis_address,
self._temp_dir,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes
if process_info is not None:
self.all_processes[ray_constants.PROCESS_TYPE_DASHBOARD] = [
process_info
]
redis_client = self.create_redis_client()
redis_client.hmset("webui", {"url": self._webui_url})
def start_plasma_store(self):
"""Start the plasma store."""
stdout_file, stderr_file = self.new_log_files("plasma_store")
process_info = ray.services.start_plasma_store(
self.get_resource_spec(),
stdout_file=stdout_file,
stderr_file=stderr_file,
plasma_directory=self._ray_params.plasma_directory,
huge_pages=self._ray_params.huge_pages,
plasma_store_socket_name=self._plasma_store_socket_name)
assert (
ray_constants.PROCESS_TYPE_PLASMA_STORE not in self.all_processes)
self.all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] = [
process_info
]
def start_raylet(self, use_valgrind=False, use_profiler=False):
"""Start the raylet.
Args:
use_valgrind (bool): True if we should start the process in
valgrind.
use_profiler (bool): True if we should start the process in the
valgrind profiler.
"""
stdout_file, stderr_file = self.new_log_files("raylet")
process_info = ray.services.start_raylet(
self._redis_address,
self._node_ip_address,
self._ray_params.node_manager_port,
self._raylet_socket_name,
self._plasma_store_socket_name,
self._ray_params.worker_path,
self._temp_dir,
self._session_dir,
self.get_resource_spec(),
self._ray_params.object_manager_port,
self._ray_params.redis_password,
use_valgrind=use_valgrind,
use_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file,
config=self._config,
include_java=self._ray_params.include_java,
java_worker_options=self._ray_params.java_worker_options,
load_code_from_local=self._ray_params.load_code_from_local,
use_pickle=self._ray_params.use_pickle)
assert ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info]
def new_worker_redirected_log_file(self, worker_id):
"""Create new logging files for workers to redirect its output."""
worker_stdout_file, worker_stderr_file = (self.new_log_files(
"worker-" + ray.utils.binary_to_hex(worker_id), True))
return worker_stdout_file, worker_stderr_file
def start_worker(self):
"""Start a worker process."""
raise NotImplementedError
def start_monitor(self):
"""Start the monitor."""
stdout_file, stderr_file = self.new_log_files("monitor")
process_info = ray.services.start_monitor(
self._redis_address,
stdout_file=stdout_file,
stderr_file=stderr_file,
autoscaling_config=self._ray_params.autoscaling_config,
redis_password=self._ray_params.redis_password)
assert ray_constants.PROCESS_TYPE_MONITOR not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_MONITOR] = [process_info]
def start_raylet_monitor(self):
"""Start the raylet monitor."""
stdout_file, stderr_file = self.new_log_files("raylet_monitor")
process_info = ray.services.start_raylet_monitor(
self._redis_address,
stdout_file=stdout_file,
stderr_file=stderr_file,
redis_password=self._ray_params.redis_password,
config=self._config)
assert (ray_constants.PROCESS_TYPE_RAYLET_MONITOR not in
self.all_processes)
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR] = [
process_info
]
def start_head_processes(self):
"""Start head processes on the node."""
logger.debug(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
assert self._redis_address is None
# If this is the head node, start the relevant head node processes.
self.start_redis()
self.start_monitor()
self.start_raylet_monitor()
if self._ray_params.include_webui:
self.start_dashboard(require_webui=True)
elif self._ray_params.include_webui is None:
self.start_dashboard(require_webui=False)
def start_ray_processes(self):
"""Start all of the processes on the node."""
logger.debug(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
self.start_plasma_store()
self.start_raylet()
self.start_reporter()
if self._ray_params.include_log_monitor:
self.start_log_monitor()
def _kill_process_type(self,
process_type,
allow_graceful=False,
check_alive=True,
wait=False):
"""Kill a process of a given type.
If the process type is PROCESS_TYPE_REDIS_SERVER, then we will kill all
of the Redis servers.
If the process was started in valgrind, then we will raise an exception
if the process has a non-zero exit code.
Args:
process_type: The type of the process to kill.
allow_graceful (bool): Send a SIGTERM first and give the process
time to exit gracefully. If that doesn't work, then use
SIGKILL. We usually want to do this outside of tests.
check_alive (bool): If true, then we expect the process to be alive
and will raise an exception if the process is already dead.
wait (bool): If true, then this method will not return until the
process in question has exited.
Raises:
This process raises an exception in the following cases:
1. The process had already died and check_alive is true.
2. The process had been started in valgrind and had a non-zero
exit code.
"""
process_infos = self.all_processes[process_type]
if process_type != ray_constants.PROCESS_TYPE_REDIS_SERVER:
assert len(process_infos) == 1
for process_info in process_infos:
process = process_info.process
# Handle the case where the process has already exited.
if process.poll() is not None:
if check_alive:
raise Exception("Attempting to kill a process of type "
"'{}', but this process is already dead."
.format(process_type))
else:
continue
if process_info.use_valgrind:
process.terminate()
process.wait()
if process.returncode != 0:
message = ("Valgrind detected some errors in process of "
"type {}. Error code {}.".format(
process_type, process.returncode))
if process_info.stdout_file is not None:
with open(process_info.stdout_file, "r") as f:
message += "\nPROCESS STDOUT:\n" + f.read()
if process_info.stderr_file is not None:
with open(process_info.stderr_file, "r") as f:
message += "\nPROCESS STDERR:\n" + f.read()
raise Exception(message)
continue
if process_info.use_valgrind_profiler:
# Give process signal to write profiler data.
os.kill(process.pid, signal.SIGINT)
# Wait for profiling data to be written.
time.sleep(0.1)
if allow_graceful:
# Allow the process one second to exit gracefully.
process.terminate()
timer = threading.Timer(1, lambda process: process.kill(),
[process])
try:
timer.start()
process.wait()
finally:
timer.cancel()
if process.poll() is not None:
continue
# If the process did not exit within one second, force kill it.
process.kill()
# The reason we usually don't call process.wait() here is that
# there's some chance we'd end up waiting a really long time.
if wait:
process.wait()
del self.all_processes[process_type]
def kill_redis(self, check_alive=True):
"""Kill the Redis servers.
Args:
check_alive (bool): Raise an exception if any of the processes
were already dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_REDIS_SERVER, check_alive=check_alive)
def kill_plasma_store(self, check_alive=True):
"""Kill the plasma store.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_PLASMA_STORE, check_alive=check_alive)
def kill_raylet(self, check_alive=True):
"""Kill the raylet.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive)
def kill_log_monitor(self, check_alive=True):
"""Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive)
def kill_reporter(self, check_alive=True):
"""Kill the reporter.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive)
def kill_dashboard(self, check_alive=True):
"""Kill the dashboard.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_DASHBOARD, check_alive=check_alive)
def kill_monitor(self, check_alive=True):
"""Kill the monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_MONITOR, check_alive=check_alive)
def kill_raylet_monitor(self, check_alive=True):
"""Kill the raylet monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)
def kill_reaper(self, check_alive=True):
"""Kill the reaper process.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_REAPER, check_alive=check_alive)
def kill_all_processes(self, check_alive=True, allow_graceful=False):
"""Kill all of the processes.
Note that This is slower than necessary because it calls kill, wait,
kill, wait, ... instead of kill, kill, ..., wait, wait, ...
Args:
check_alive (bool): Raise an exception if any of the processes were
already dead.
"""
# Kill the raylet first. This is important for suppressing errors at
# shutdown because we give the raylet a chance to exit gracefully and
# clean up its child worker processes. If we were to kill the plasma
# store (or Redis) first, that could cause the raylet to exit
# ungracefully, leading to more verbose output from the workers.
if ray_constants.PROCESS_TYPE_RAYLET in self.all_processes:
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET,
check_alive=check_alive,
allow_graceful=allow_graceful)
# We call "list" to copy the keys because we are modifying the
# dictionary while iterating over it.
for process_type in list(self.all_processes.keys()):
# Need to kill the reaper process last in case we die unexpectedly
# while cleaning up.
if process_type != ray_constants.PROCESS_TYPE_REAPER:
self._kill_process_type(
process_type,
check_alive=check_alive,
allow_graceful=allow_graceful)
if ray_constants.PROCESS_TYPE_REAPER in self.all_processes:
self._kill_process_type(
ray_constants.PROCESS_TYPE_REAPER,
check_alive=check_alive,
allow_graceful=allow_graceful)
def live_processes(self):
"""Return a list of the live processes.
Returns:
A list of the live processes.
"""
result = []
for process_type, process_infos in self.all_processes.items():
for process_info in process_infos:
if process_info.process.poll() is None:
result.append((process_type, process_info.process))
return result
def dead_processes(self):
"""Return a list of the dead processes.
Note that this ignores processes that have been explicitly killed,
e.g., via a command like node.kill_raylet().
Returns:
A list of the dead processes ignoring the ones that have been
explicitly killed.
"""
result = []
for process_type, process_infos in self.all_processes.items():
for process_info in process_infos:
if process_info.process.poll() is not None:
result.append((process_type, process_info.process))
return result
def any_processes_alive(self):
"""Return true if any processes are still alive.
Returns:
True if any process is still alive.
"""
return any(self.live_processes())
def remaining_processes_alive(self):
"""Return true if all remaining processes are still alive.
Note that this ignores processes that have been explicitly killed,
e.g., via a command like node.kill_raylet().
Returns:
True if any process that wasn't explicitly killed is still alive.
"""
return not any(self.dead_processes())
class LocalNode:
"""Imitate the node that manages the processes in local mode."""
def kill_all_processes(self, *args, **kwargs):
"""Kill all of the processes."""
pass # Keep this function empty because it will be used in worker.py
@property
def address_info(self):
"""Get a dictionary of addresses."""
return {} # Return a null dict.
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/parameter.py
|
Python
|
import logging
import numpy as np
from packaging import version
import ray.ray_constants as ray_constants
class RayParams:
"""A class used to store the parameters used by Ray.
Attributes:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_cpus (int): Number of CPUs to configure the raylet with.
num_gpus (int): Number of GPUs to configure the raylet with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: Total available memory for workers requesting memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries. This only applies to the
sharded redis tables (task and object tables).
object_manager_port int: The port to use for the object manager.
node_manager_port: The port to use for the node manager.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
worker_path (str): The path of the source code that will be run by the
worker.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this value is
None, then the UI will be started if the relevant dependencies are
present.
webui_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
autoscaling_config: path to autoscaling config file.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
load_code_from_local: Whether load code from local file or from GCS.
use_pickle: Whether data objects should be serialized with cloudpickle.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
"""
def __init__(self,
redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
memory=None,
object_store_memory=None,
redis_max_memory=None,
redis_port=None,
redis_shard_ports=None,
object_manager_port=None,
node_manager_port=None,
node_ip_address=None,
object_id_seed=None,
local_mode=False,
driver_mode=None,
redirect_worker_output=None,
redirect_output=None,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
worker_path=None,
huge_pages=False,
include_webui=None,
webui_host="localhost",
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
include_log_monitor=None,
autoscaling_config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
use_pickle=False,
_internal_config=None):
self.object_id_seed = object_id_seed
self.redis_address = redis_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.redis_max_memory = redis_max_memory
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.object_manager_port = object_manager_port
self.node_manager_port = node_manager_port
self.node_ip_address = node_ip_address
self.local_mode = local_mode
self.driver_mode = driver_mode
self.redirect_worker_output = redirect_worker_output
self.redirect_output = redirect_output
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_password = redis_password
self.plasma_directory = plasma_directory
self.worker_path = worker_path
self.huge_pages = huge_pages
self.include_webui = include_webui
self.webui_host = webui_host
self.plasma_store_socket_name = plasma_store_socket_name
self.raylet_socket_name = raylet_socket_name
self.temp_dir = temp_dir
self.include_log_monitor = include_log_monitor
self.autoscaling_config = autoscaling_config
self.include_java = include_java
self.java_worker_options = java_worker_options
self.load_code_from_local = load_code_from_local
self.use_pickle = use_pickle
self._internal_config = _internal_config
self._check_usage()
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update: %s" % arg)
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage()
def _check_usage(self):
if self.resources is not None:
assert "CPU" not in self.resources, (
"'CPU' should not be included in the resource dictionary. Use "
"num_cpus instead.")
assert "GPU" not in self.resources, (
"'GPU' should not be included in the resource dictionary. Use "
"num_gpus instead.")
if self.redirect_worker_output is not None:
raise DeprecationWarning(
"The redirect_worker_output argument is deprecated. To "
"control logging to the driver, use the 'log_to_driver' "
"argument to 'ray.init()'")
if self.redirect_output is not None:
raise DeprecationWarning(
"The redirect_output argument is deprecated.")
if self.use_pickle:
assert (version.parse(
np.__version__) >= version.parse("1.16.0")), (
"numpy >= 1.16.0 required for use_pickle=True support. "
"You can use ray.init(use_pickle=False) for older numpy "
"versions, but this may be removed in future versions.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/profiling.py
|
Python
|
import ray
class _NullLogSpan:
"""A log span context manager that does nothing"""
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
NULL_LOG_SPAN = _NullLogSpan()
def profile(event_type, extra_data=None):
"""Profile a span of time so that it appears in the timeline visualization.
Note that this only works in the raylet code path.
This function can be used as follows (both on the driver or within a task).
.. code-block:: python
with ray.profile("custom event", extra_data={'key': 'value'}):
# Do some computation here.
Optionally, a dictionary can be passed as the "extra_data" argument, and
it can have keys "name" and "cname" if you want to override the default
timeline display text and box color. Other values will appear at the bottom
of the chrome tracing GUI when you click on the box corresponding to this
profile span.
Args:
event_type: A string describing the type of the event.
extra_data: This must be a dictionary mapping strings to strings. This
data will be added to the json objects that are used to populate
the timeline, so if you want to set a particular color, you can
simply set the "cname" attribute to an appropriate color.
Similarly, if you set the "name" attribute, then that will set the
text displayed on the box in the timeline.
Returns:
An object that can profile a span of time via a "with" statement.
"""
worker = ray.worker.global_worker
if worker.mode == ray.worker.LOCAL_MODE:
return NULL_LOG_SPAN
return worker.core_worker.profile_event(
event_type.encode("ascii"), extra_data)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/projects/__init__.py
|
Python
|
from ray.projects.projects import ProjectDefinition
__all__ = [
"ProjectDefinition",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/projects/projects.py
|
Python
|
import argparse
import copy
import json
import jsonschema
import os
import yaml
def make_argument_parser(name, params, wildcards):
"""Build argument parser dynamically to parse parameter arguments.
Args:
name (str): Name of the command to parse.
params (dict): Parameter specification used to construct
the argparse parser.
wildcards (bool): Whether wildcards are allowed as arguments.
Returns:
The argparse parser.
A dictionary from argument name to list of valid choices.
"""
parser = argparse.ArgumentParser(prog=name)
# For argparse arguments that have a 'choices' list associated
# with them, save it in the following dictionary.
choices = {}
for param in params:
# Construct arguments to pass into argparse's parser.add_argument.
argparse_kwargs = copy.deepcopy(param)
name = argparse_kwargs.pop("name")
if wildcards and "choices" in param:
choices[name] = param["choices"]
argparse_kwargs["choices"] = param["choices"] + ["*"]
if "type" in param:
types = {"int": int, "str": str, "float": float}
if param["type"] in types:
argparse_kwargs["type"] = types[param["type"]]
else:
raise ValueError(
"Parameter {} has type {} which is not supported. "
"Type must be one of {}".format(name, param["type"],
list(types.keys())))
parser.add_argument("--" + name, dest=name, **argparse_kwargs)
return parser, choices
class ProjectDefinition:
def __init__(self, current_dir):
"""Finds ray-project folder for current project, parse and validates it.
Args:
current_dir (str): Path from which to search for ray-project.
Raises:
jsonschema.exceptions.ValidationError: This exception is raised
if the project file is not valid.
ValueError: This exception is raised if there are other errors in
the project definition (e.g. files not existing).
"""
root = find_root(current_dir)
if root is None:
raise ValueError("No project root found")
# Add an empty pathname to the end so that rsync will copy the project
# directory to the correct target.
self.root = os.path.join(root, "")
# Parse the project YAML.
project_file = os.path.join(self.root, "ray-project", "project.yaml")
if not os.path.exists(project_file):
raise ValueError("Project file {} not found".format(project_file))
with open(project_file) as f:
self.config = yaml.safe_load(f)
check_project_config(self.root, self.config)
def cluster_yaml(self):
"""Return the project's cluster configuration filename."""
return self.config["cluster"]["config"]
def working_directory(self):
"""Return the project's working directory on a cluster session."""
# Add an empty pathname to the end so that rsync will copy the project
# directory to the correct target.
directory = os.path.join("~", self.config["name"], "")
return directory
def get_command_info(self, command_name, args, shell, wildcards=False):
"""Get the shell command, parsed arguments and config for a command.
Args:
command_name (str): Name of the command to run. The command
definition should be available in project.yaml.
args (tuple): Tuple containing arguments to format the command
with.
wildcards (bool): If True, enable wildcards as arguments.
Returns:
The raw shell command to run with placeholders for the arguments.
The parsed argument dictonary, parsed with argparse.
The config dictionary of the command.
Raises:
ValueError: This exception is raised if the given command is not
found in project.yaml.
"""
if shell or not command_name:
return command_name, {}, {}
command_to_run = None
params = None
config = None
for command_definition in self.config["commands"]:
if command_definition["name"] == command_name:
command_to_run = command_definition["command"]
params = command_definition.get("params", [])
config = command_definition.get("config", {})
if not command_to_run:
raise ValueError(
"Cannot find the command named '{}' in commmands section "
"of the project file.".format(command_name))
parser, choices = make_argument_parser(command_name, params, wildcards)
parsed_args = vars(parser.parse_args(list(args)))
if wildcards:
for key, val in parsed_args.items():
if val == "*":
parsed_args[key] = choices[key]
return command_to_run, parsed_args, config
def git_repo(self):
return self.config.get("repo", None)
def find_root(directory):
"""Find root directory of the ray project.
Args:
directory (str): Directory to start the search in.
Returns:
Path of the parent directory containing the ray-project or
None if no such project is found.
"""
prev, directory = None, os.path.abspath(directory)
while prev != directory:
if os.path.isdir(os.path.join(directory, "ray-project")):
return directory
prev, directory = directory, os.path.abspath(
os.path.join(directory, os.pardir))
return None
def validate_project_schema(project_config):
"""Validate a project config against the official ray project schema.
Args:
project_config (dict): Parsed project yaml.
Raises:
jsonschema.exceptions.ValidationError: This exception is raised
if the project file is not valid.
"""
dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir, "schema.json")) as f:
schema = json.load(f)
jsonschema.validate(instance=project_config, schema=schema)
def check_project_config(project_root, project_config):
"""Checks if the project definition is valid.
Args:
project_root (str): Path containing the ray-project
project_config (dict): Project config definition
Raises:
jsonschema.exceptions.ValidationError: This exception is raised
if the project file is not valid.
ValueError: This exception is raised if there are other errors in
the project definition (e.g. files not existing).
"""
validate_project_schema(project_config)
# Make sure the cluster yaml file exists
cluster_file = os.path.join(project_root,
project_config["cluster"]["config"])
if not os.path.exists(cluster_file):
raise ValueError("'cluster' file does not exist "
"in {}".format(project_root))
if "environment" in project_config:
env = project_config["environment"]
if sum(["dockerfile" in env, "dockerimage" in env]) > 1:
raise ValueError("Cannot specify both 'dockerfile' and "
"'dockerimage' in environment.")
if "requirements" in env:
requirements_file = os.path.join(project_root, env["requirements"])
if not os.path.exists(requirements_file):
raise ValueError("'requirements' file in 'environment' does "
"not exist in {}".format(project_root))
if "dockerfile" in env:
docker_file = os.path.join(project_root, env["dockerfile"])
if not os.path.exists(docker_file):
raise ValueError("'dockerfile' file in 'environment' does "
"not exist in {}".format(project_root))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/projects/scripts.py
|
Python
|
import argparse
import click
import copy
import jsonschema
import logging
import os
from shutil import copyfile
import subprocess
import sys
import time
import ray
from ray.autoscaler.commands import (
attach_cluster,
exec_cluster,
create_or_update_cluster,
rsync,
teardown_cluster,
)
logging.basicConfig(format=ray.ray_constants.LOGGER_FORMAT, level=logging.INFO)
logger = logging.getLogger(__file__)
# File layout for generated project files
# user-dir/
# ray-project/
# project.yaml
# cluster.yaml
# requirements.txt
PROJECT_DIR = "ray-project"
PROJECT_YAML = os.path.join(PROJECT_DIR, "project.yaml")
CLUSTER_YAML = os.path.join(PROJECT_DIR, "cluster.yaml")
REQUIREMENTS_TXT = os.path.join(PROJECT_DIR, "requirements.txt")
# File layout for templates file
# RAY/.../projects/
# templates/
# cluster_template.yaml
# project_template.yaml
# requirements.txt
_THIS_FILE_DIR = os.path.split(os.path.abspath(__file__))[0]
_TEMPLATE_DIR = os.path.join(_THIS_FILE_DIR, "templates")
PROJECT_TEMPLATE = os.path.join(_TEMPLATE_DIR, "project_template.yaml")
CLUSTER_TEMPLATE = os.path.join(_TEMPLATE_DIR, "cluster_template.yaml")
REQUIREMENTS_TXT_TEMPLATE = os.path.join(_TEMPLATE_DIR, "requirements.txt")
@click.group(
"project", help="[Experimental] Commands working with ray project")
def project_cli():
pass
@project_cli.command(help="Validate current project spec")
@click.option(
"--verbose", help="If set, print the validated file", is_flag=True)
def validate(verbose):
try:
project = ray.projects.ProjectDefinition(os.getcwd())
print("Project files validated!", file=sys.stderr)
if verbose:
print(project.config)
except (jsonschema.exceptions.ValidationError, ValueError) as e:
print("Validation failed for the following reason", file=sys.stderr)
raise click.ClickException(e)
@project_cli.command(help="Create a new project within current directory")
@click.argument("project_name")
@click.option(
"--cluster-yaml",
help="Path to autoscaler yaml. Created by default",
default=None)
@click.option(
"--requirements",
help="Path to requirements.txt. Created by default",
default=None)
def create(project_name, cluster_yaml, requirements):
if os.path.exists(PROJECT_DIR):
raise click.ClickException(
"Project directory {} already exists.".format(PROJECT_DIR))
os.makedirs(PROJECT_DIR)
if cluster_yaml is None:
logger.warning("Using default autoscaler yaml")
with open(CLUSTER_TEMPLATE) as f:
template = f.read().replace(r"{{name}}", project_name)
with open(CLUSTER_YAML, "w") as f:
f.write(template)
cluster_yaml = CLUSTER_YAML
if requirements is None:
logger.warning("Using default requirements.txt")
# no templating required, just copy the file
copyfile(REQUIREMENTS_TXT_TEMPLATE, REQUIREMENTS_TXT)
requirements = REQUIREMENTS_TXT
repo = None
if os.path.exists(".git"):
try:
repo = subprocess.check_output(
"git remote get-url origin".split(" ")).strip()
logger.info("Setting repo URL to %s", repo)
except subprocess.CalledProcessError:
pass
with open(PROJECT_TEMPLATE) as f:
project_template = f.read()
# NOTE(simon):
# We could use jinja2, which will make the templating part easier.
project_template = project_template.replace(r"{{name}}", project_name)
project_template = project_template.replace(r"{{cluster}}",
cluster_yaml)
project_template = project_template.replace(r"{{requirements}}",
requirements)
if repo is None:
project_template = project_template.replace(
r"{{repo_string}}", "# repo: {}".format("..."))
else:
project_template = project_template.replace(
r"{{repo_string}}", "repo: {}".format(repo))
with open(PROJECT_YAML, "w") as f:
f.write(project_template)
@click.group(
"session",
help="[Experimental] Commands working with sessions, which are "
"running instances of a project.")
def session_cli():
pass
def load_project_or_throw():
# Validate the project file
try:
return ray.projects.ProjectDefinition(os.getcwd())
except (jsonschema.exceptions.ValidationError, ValueError):
raise click.ClickException(
"Project file validation failed. Please run "
"`ray project validate` to inspect the error.")
class SessionRunner:
"""Class for setting up a session and executing commands in it."""
def __init__(self, session_name=None):
"""Initialize session runner and try to parse the command arguments.
Args:
session_name (str): Name of the session.
Raises:
click.ClickException: This exception is raised if any error occurs.
"""
self.project_definition = load_project_or_throw()
self.session_name = session_name
# Check for features we don't support right now
project_environment = self.project_definition.config.get(
"environment", {})
need_docker = ("dockerfile" in project_environment
or "dockerimage" in project_environment)
if need_docker:
raise click.ClickException(
"Docker support in session is currently not implemented.")
def create_cluster(self):
"""Create a cluster that will run the session."""
create_or_update_cluster(
config_file=self.project_definition.cluster_yaml(),
override_min_workers=None,
override_max_workers=None,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=self.session_name,
)
def sync_files(self):
"""Synchronize files with the session."""
rsync(
self.project_definition.cluster_yaml(),
source=self.project_definition.root,
target=self.project_definition.working_directory(),
override_cluster_name=self.session_name,
down=False,
)
def setup_environment(self):
"""Set up the environment of the session."""
project_environment = self.project_definition.config.get(
"environment", {})
if "requirements" in project_environment:
requirements_txt = project_environment["requirements"]
# Create a temporary requirements_txt in the head node.
remote_requirements_txt = (
"/tmp/" + "ray_project_requirements_txt_{}".format(
time.time()))
rsync(
self.project_definition.cluster_yaml(),
source=requirements_txt,
target=remote_requirements_txt,
override_cluster_name=self.session_name,
down=False,
)
self.execute_command(
"pip install -r {}".format(remote_requirements_txt))
if "shell" in project_environment:
for cmd in project_environment["shell"]:
self.execute_command(cmd)
def execute_command(self, cmd, config={}):
"""Execute a shell command in the session.
Args:
cmd (str): Shell command to run in the session. It will be
run in the working directory of the project.
"""
cwd = self.project_definition.working_directory()
cmd = "cd {cwd}; {cmd}".format(cwd=cwd, cmd=cmd)
exec_cluster(
config_file=self.project_definition.cluster_yaml(),
cmd=cmd,
docker=False,
screen=False,
tmux=config.get("tmux", False),
stop=False,
start=False,
override_cluster_name=self.session_name,
port_forward=config.get("port_forward", None),
)
def format_command(command, parsed_args):
"""Substitute arguments into command.
Args:
command (str): Shell comand with argument placeholders.
parsed_args (dict): Dictionary that maps from argument names
to their value.
Returns:
Shell command with parameters from parsed_args substituted.
"""
for key, val in parsed_args.items():
command = command.replace("{{" + key + "}}", str(val))
return command
def get_session_runs(name, command, parsed_args):
"""Get a list of sessions to start.
Args:
command (str): Shell command with argument placeholders.
parsed_args (dict): Dictionary that maps from argument names
to their values.
Returns:
List of sessions to start, which are dictionaries with keys:
"name": Name of the session to start,
"command": Command to run after starting the session,
"params": Parameters for this run,
"num_steps": 4 if a command should be run, 3 if not.
"""
if not command:
return [{"name": name, "command": None, "params": {}, "num_steps": 3}]
# Try to find a wildcard argument (i.e. one that has a list of values)
# and give an error if there is more than one (currently unsupported).
wildcard_arg = None
for key, val in parsed_args.items():
if isinstance(val, list):
if not wildcard_arg:
wildcard_arg = key
else:
raise click.ClickException(
"More than one wildcard is not supported at the moment")
if not wildcard_arg:
session_run = {
"name": name,
"command": format_command(command, parsed_args),
"params": parsed_args,
"num_steps": 4
}
return [session_run]
else:
session_runs = []
for val in parsed_args[wildcard_arg]:
parsed_args = copy.deepcopy(parsed_args)
parsed_args[wildcard_arg] = val
session_run = {
"name": "{}-{}-{}".format(name, wildcard_arg, val),
"command": format_command(command, parsed_args),
"params": parsed_args,
"num_steps": 4
}
session_runs.append(session_run)
return session_runs
@session_cli.command(help="Attach to an existing cluster")
@click.option(
"--screen", is_flag=True, default=False, help="Run the command in screen.")
@click.option("--tmux", help="Attach to tmux session", is_flag=True)
def attach(screen, tmux):
project_definition = load_project_or_throw()
attach_cluster(
project_definition.cluster_yaml(),
start=False,
use_screen=screen,
use_tmux=tmux,
override_cluster_name=None,
new=False,
)
@session_cli.command(help="Stop a session based on current project config")
@click.option("--name", help="Name of the session to stop", default=None)
def stop(name):
project_definition = load_project_or_throw()
if not name:
name = project_definition.config["name"]
teardown_cluster(
project_definition.cluster_yaml(),
yes=True,
workers_only=False,
override_cluster_name=name)
@session_cli.command(
name="start",
context_settings=dict(ignore_unknown_options=True, ),
help="Start a session based on current project config")
@click.argument("command", required=False)
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
@click.option(
"--shell",
help=(
"If set, run the command as a raw shell command instead of looking up "
"the command in the project config"),
is_flag=True)
@click.option("--name", help="A name to tag the session with.", default=None)
def session_start(command, args, shell, name):
project_definition = load_project_or_throw()
if not name:
name = project_definition.config["name"]
# Get the actual command to run. This also validates the command,
# which should be done before the cluster is started.
try:
command, parsed_args, config = project_definition.get_command_info(
command, args, shell, wildcards=True)
except ValueError as e:
raise click.ClickException(e)
session_runs = get_session_runs(name, command, parsed_args)
if len(session_runs) > 1 and not config.get("tmux", False):
logging.info("Using wildcards with tmux = False would not create "
"sessions in parallel, so we are overriding it with "
"tmux = True.")
config["tmux"] = True
for run in session_runs:
runner = SessionRunner(session_name=run["name"])
logger.info("[1/{}] Creating cluster".format(run["num_steps"]))
runner.create_cluster()
logger.info("[2/{}] Syncing the project".format(run["num_steps"]))
runner.sync_files()
logger.info("[3/{}] Setting up environment".format(run["num_steps"]))
runner.setup_environment()
if run["command"]:
# Run the actual command.
logger.info("[4/4] Running command")
runner.execute_command(run["command"], config)
@session_cli.command(
name="commands",
help="Print available commands for sessions of this project.")
def session_commands():
project_definition = load_project_or_throw()
print("Active project: " + project_definition.config["name"])
print()
commands = project_definition.config["commands"]
for command in commands:
print("Command \"{}\":".format(command["name"]))
parser = argparse.ArgumentParser(
command["name"], description=command.get("help"), add_help=False)
params = command.get("params", [])
for param in params:
name = param.pop("name")
if "type" in param:
param.pop("type")
parser.add_argument("--" + name, **param)
help_string = parser.format_help()
# Indent the help message by two spaces and print it.
print("\n".join([" " + line for line in help_string.split("\n")]))
@session_cli.command(
name="execute",
context_settings=dict(ignore_unknown_options=True, ),
help="Execute a command in a session")
@click.argument("command", required=False)
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
@click.option(
"--shell",
help=(
"If set, run the command as a raw shell command instead of looking up "
"the command in the project config"),
is_flag=True)
@click.option(
"--name", help="Name of the session to run this command on", default=None)
def session_execute(command, args, shell, name):
project_definition = load_project_or_throw()
try:
command, parsed_args, config = project_definition.get_command_info(
command, args, shell, wildcards=False)
except ValueError as e:
raise click.ClickException(e)
runner = SessionRunner(session_name=name)
command = format_command(command, parsed_args)
runner.execute_command(command)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/ray_cluster_perf.py
|
Python
|
"""This is the script for `ray clusterbenchmark`."""
import time
import numpy as np
import ray
from ray.cluster_utils import Cluster
def main():
cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"object_store_memory": 20 * 1024 * 1024 * 1024,
"num_cpus": 16
})
cluster.add_node(
object_store_memory=20 * 1024 * 1024 * 1024, num_gpus=1, num_cpus=16)
object_id_list = []
for i in range(0, 10):
object_id = ray.put(np.random.rand(1024 * 128, 1024))
object_id_list.append(object_id)
@ray.remote(num_gpus=1)
def f(object_id_list):
diffs = []
for object_id in object_id_list:
before = time.time()
ray.get(object_id)
after = time.time()
diffs.append(after - before)
time.sleep(1)
return np.mean(diffs), np.std(diffs)
time_diff, time_diff_std = ray.get(f.remote(object_id_list))
print("latency to get an 1G object over network", round(time_diff, 2),
"+-", round(time_diff_std, 2))
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/ray_constants.py
|
Python
|
"""Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
def direct_call_enabled():
return bool(int(os.environ.get("RAY_FORCE_DIRECT", "1")))
ID_SIZE = 20
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 20 * 10**9
# The default number of retries to call `put` when the object store is full.
DEFAULT_PUT_OBJECT_RETRIES = 5
# The default seconds for delay between calls to retry `put` when
# the object store is full. This delay is exponentially doubled up to
# DEFAULT_PUT_OBJECT_RETRIES times.
DEFAULT_PUT_OBJECT_DELAY = 1
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES * 0.7
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 40000
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 50 * 1024 * 1024
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
# Fraction of plasma memory that can be reserved. It is actually 70% but this
# is set to 69% to leave some headroom.
PLASMA_RESERVABLE_MEMORY_FRACTION = 0.69
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
# The maximum number of nodes to launch in a single request.
# Multiple requests may be made for this batch size, up to
# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
# Interval at which to perform autoscaling updates.
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# The autoscaler will attempt to restart Ray on nodes it hasn't heard from
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = "The logging format. default='{}'".format(LOGGER_FORMAT)
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
# A constant indicating that an actor doesn't need reconstructions.
NO_RECONSTRUCTION = 0
# A constant indicating that an actor should be reconstructed infinite times.
INFINITE_RECONSTRUCTION = 2**30
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAYLET_MONITOR = "raylet_monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
LOG_MONITOR_MAX_OPEN_FILES = 200
# A constant used as object metadata to indicate the object is raw binary.
RAW_BUFFER_METADATA = b"RAW"
# A constant used as object metadata to indicate the object is pickled. This
# format is only ever used for Python inline task argument values.
PICKLE_BUFFER_METADATA = b"PICKLE"
# A constant used as object metadata to indicate the object is pickle5 format.
PICKLE5_BUFFER_METADATA = b"PICKLE5"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/ray_perf.py
|
Python
|
"""This is the script for `ray microbenchmark`."""
import os
import time
import numpy as np
import multiprocessing
import ray
# Only run tests matching this filter pattern.
filter_pattern = os.environ.get("TESTS_TO_RUN", "")
@ray.remote(num_cpus=0)
class Actor:
def small_value(self):
return b"ok"
def small_value_arg(self, x):
return b"ok"
def small_value_batch(self, n):
ray.get([small_value.remote() for _ in range(n)])
@ray.remote(num_cpus=0)
class Client:
def __init__(self, servers):
if not isinstance(servers, list):
servers = [servers]
self.servers = servers
def small_value_batch(self, n):
results = []
for s in self.servers:
results.extend([s.small_value.remote() for _ in range(n)])
ray.get(results)
def small_value_batch_arg(self, n):
x = ray.put(0)
results = []
for s in self.servers:
results.extend([s.small_value_arg.remote(x) for _ in range(n)])
ray.get(results)
@ray.remote
def small_value():
return b"ok"
@ray.remote
def small_value_batch(n):
submitted = [small_value.remote() for _ in range(n)]
ray.get(submitted)
return 0
def timeit(name, fn, multiplier=1):
if filter_pattern not in name:
return
# warmup
start = time.time()
while time.time() - start < 1:
fn()
# real run
stats = []
for _ in range(4):
start = time.time()
count = 0
while time.time() - start < 2:
fn()
count += 1
end = time.time()
stats.append(multiplier * count / (end - start))
print(name, "per second", round(np.mean(stats), 2), "+-",
round(np.std(stats), 2))
def main():
print("Tip: set TESTS_TO_RUN='pattern' to run a subset of benchmarks")
ray.init()
value = ray.put(0)
arr = np.zeros(100 * 1024 * 1024, dtype=np.int64)
def get_small():
ray.get(value)
timeit("single client get calls", get_small)
def put_small():
ray.put(0)
timeit("single client put calls", put_small)
def put_large():
ray.put(arr)
timeit("single client put gigabytes", put_large, 8 * 0.1)
@ray.remote
def do_put_small():
for _ in range(100):
ray.put(0)
def put_multi_small():
ray.get([do_put_small.remote() for _ in range(10)])
timeit("multi client put calls", put_multi_small, 1000)
@ray.remote
def do_put():
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024, dtype=np.int64))
def put_multi():
ray.get([do_put.remote() for _ in range(10)])
timeit("multi client put gigabytes", put_multi, 10 * 8 * 0.1)
def small_task():
ray.get(small_value.remote())
timeit("single client tasks sync", small_task)
def small_task_async():
ray.get([small_value.remote() for _ in range(1000)])
timeit("single client tasks async", small_task_async, 1000)
n = 10000
m = 4
actors = [Actor.remote() for _ in range(m)]
def multi_task():
submitted = [a.small_value_batch.remote(n) for a in actors]
ray.get(submitted)
timeit("multi client tasks async", multi_task, n * m)
a = Actor.remote()
def actor_sync():
ray.get(a.small_value.remote())
timeit("1:1 actor calls sync", actor_sync)
a = Actor.remote()
def actor_async():
ray.get([a.small_value.remote() for _ in range(1000)])
timeit("1:1 actor calls async", actor_async, 1000)
a = Actor.options(max_concurrency=16).remote()
def actor_concurrent():
ray.get([a.small_value.remote() for _ in range(1000)])
timeit("1:1 actor calls concurrent", actor_concurrent, 1000)
n = 5000
n_cpu = multiprocessing.cpu_count() // 2
actors = [Actor._remote() for _ in range(n_cpu)]
client = Client.remote(actors)
def actor_async_direct():
ray.get(client.small_value_batch.remote(n))
timeit("1:n actor calls async", actor_async_direct, n * len(actors))
n_cpu = multiprocessing.cpu_count() // 2
a = [Actor.remote() for _ in range(n_cpu)]
@ray.remote
def work(actors):
ray.get([actors[i % n_cpu].small_value.remote() for i in range(n)])
def actor_multi2():
ray.get([work.remote(a) for _ in range(m)])
timeit("n:n actor calls async", actor_multi2, m * n)
n = 1000
actors = [Actor._remote() for _ in range(n_cpu)]
clients = [Client.remote(a) for a in actors]
def actor_multi2_direct_arg():
ray.get([c.small_value_batch_arg.remote(n) for c in clients])
timeit("n:n actor calls with arg async", actor_multi2_direct_arg,
n * len(clients))
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/ray_process_reaper.py
|
Python
|
import os
import signal
import sys
import time
"""
This is a lightweight "reaper" process used to ensure that ray processes are
cleaned up properly when the main ray process dies unexpectedly (e.g.,
segfaults or gets SIGKILLed). Note that processes may not be cleaned up
properly if this process is SIGTERMed or SIGKILLed.
It detects that its parent has died by reading from stdin, which must be
inherited from the parent process so that the OS will deliver an EOF if the
parent dies. When this happens, the reaper process kills the rest of its
process group (first attempting graceful shutdown with SIGTERM, then escalating
to SIGKILL).
"""
SIGTERM_GRACE_PERIOD_SECONDS = 1
def reap_process_group(*args):
def sigterm_handler(*args):
# Give a one-second grace period for other processes to clean up.
time.sleep(SIGTERM_GRACE_PERIOD_SECONDS)
# SIGKILL the pgroup (including ourselves) as a last-resort.
os.killpg(0, signal.SIGKILL)
# Set a SIGTERM handler to handle SIGTERMing ourselves with the group.
signal.signal(signal.SIGTERM, sigterm_handler)
# Our parent must have died, SIGTERM the group (including ourselves).
# TODO(mehrdadn): killpg isn't supported on Windows.
os.killpg(0, signal.SIGTERM)
def main():
# Read from stdout forever. Because stdout is a file descriptor
# inherited from our parent process, we will get an EOF if the parent
# dies, which is signaled by an empty return from read().
# We intentionally don't set any signal handlers here, so a SIGTERM from
# the parent can be used to kill this process gracefully without it killing
# the rest of the process group.
while len(sys.stdin.read()) != 0:
pass
reap_process_group()
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/remote_function.py
|
Python
|
import logging
from functools import wraps
from ray import cloudpickle as pickle
from ray import ray_constants
from ray.function_manager import FunctionDescriptor
import ray.signature
# Default parameters for remote functions.
DEFAULT_REMOTE_FUNCTION_CPUS = 1
DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS = 1
DEFAULT_REMOTE_FUNCTION_MAX_CALLS = 0
# Normal tasks may be retried on failure this many times.
# TODO(swang): Allow this to be set globally for an application.
DEFAULT_REMOTE_FUNCTION_NUM_TASK_RETRIES = 3
logger = logging.getLogger(__name__)
class RemoteFunction:
"""A remote function.
This is a decorated function. It can be used to spawn tasks.
Attributes:
_function: The original function.
_function_descriptor: The function descriptor. This is not defined
until the remote function is first invoked because that is when the
function is pickled, and the pickled function is used to compute
the function descriptor.
_function_name: The module and function name.
_num_cpus: The default number of CPUs to use for invocations of this
remote function.
_num_gpus: The default number of GPUs to use for invocations of this
remote function.
_memory: The heap memory request for this task.
_object_store_memory: The object store memory request for this task.
_resources: The default custom resource requirements for invocations of
this remote function.
_num_return_vals: The default number of return values for invocations
of this remote function.
_max_calls: The number of times a worker can execute this function
before executing.
_decorator: An optional decorator that should be applied to the remote
function invocation (as opposed to the function execution) before
invoking the function. The decorator must return a function that
takes in two arguments ("args" and "kwargs"). In most cases, it
should call the function that was passed into the decorator and
return the resulting ObjectIDs. For an example, see
"test_decorated_function" in "python/ray/tests/test_basic.py".
_function_signature: The function signature.
_last_export_session_and_job: A pair of the last exported session
and job to help us to know whether this function was exported.
This is an imperfect mechanism used to determine if we need to
export the remote function again. It is imperfect in the sense that
the actor class definition could be exported multiple times by
different workers.
"""
def __init__(self, function, num_cpus, num_gpus, memory,
object_store_memory, resources, num_return_vals, max_calls,
max_retries):
self._function = function
self._function_name = (
self._function.__module__ + "." + self._function.__name__)
self._num_cpus = (DEFAULT_REMOTE_FUNCTION_CPUS
if num_cpus is None else num_cpus)
self._num_gpus = num_gpus
self._memory = memory
if object_store_memory is not None:
raise NotImplementedError(
"setting object_store_memory is not implemented for tasks")
self._object_store_memory = None
self._resources = resources
self._num_return_vals = (DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS if
num_return_vals is None else num_return_vals)
self._max_calls = (DEFAULT_REMOTE_FUNCTION_MAX_CALLS
if max_calls is None else max_calls)
self._max_retries = (DEFAULT_REMOTE_FUNCTION_NUM_TASK_RETRIES
if max_retries is None else max_retries)
self._decorator = getattr(function, "__ray_invocation_decorator__",
None)
self._function_signature = ray.signature.extract_signature(
self._function)
self._last_export_session_and_job = None
# Override task.remote's signature and docstring
@wraps(function)
def _remote_proxy(*args, **kwargs):
return self._remote(args=args, kwargs=kwargs)
self.remote = _remote_proxy
self.direct_call_enabled = ray_constants.direct_call_enabled()
def __call__(self, *args, **kwargs):
raise Exception("Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(
self._function_name, self._function_name))
def _submit(self,
args=None,
kwargs=None,
num_return_vals=None,
num_cpus=None,
num_gpus=None,
resources=None):
logger.warning(
"WARNING: _submit() is being deprecated. Please use _remote().")
return self._remote(
args=args,
kwargs=kwargs,
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources)
def options(self, **options):
"""Convenience method for executing a task with options.
Same arguments as func._remote(), but returns a wrapped function
that a non-underscore .remote() can be called on.
Examples:
# The following two calls are equivalent.
>>> func._remote(num_cpus=4, args=[x, y])
>>> func.options(num_cpus=4).remote(x, y)
"""
func_cls = self
class FuncWrapper:
def remote(self, *args, **kwargs):
return func_cls._remote(args=args, kwargs=kwargs, **options)
return FuncWrapper()
def _remote(self,
args=None,
kwargs=None,
num_return_vals=None,
is_direct_call=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
max_retries=None):
"""Submit the remote function for execution."""
worker = ray.worker.get_global_worker()
worker.check_connected()
# If this function was not exported in this session and job, we need to
# export this function again, because the current GCS doesn't have it.
if self._last_export_session_and_job != worker.current_session_and_job:
# There is an interesting question here. If the remote function is
# used by a subsequent driver (in the same script), should the
# second driver pickle the function again? If yes, then the remote
# function definition can differ in the second driver (e.g., if
# variables in its closure have changed). We probably want the
# behavior of the remote function in the second driver to be
# independent of whether or not the function was invoked by the
# first driver. This is an argument for repickling the function,
# which we do here.
self._pickled_function = pickle.dumps(self._function)
self._function_descriptor = FunctionDescriptor.from_function(
self._function, self._pickled_function)
self._function_descriptor_list = (
self._function_descriptor.get_function_descriptor_list())
self._last_export_session_and_job = worker.current_session_and_job
worker.function_actor_manager.export(self)
kwargs = {} if kwargs is None else kwargs
args = [] if args is None else args
if num_return_vals is None:
num_return_vals = self._num_return_vals
if is_direct_call is None:
is_direct_call = self.direct_call_enabled
if max_retries is None:
max_retries = self._max_retries
resources = ray.utils.resources_from_resource_arguments(
self._num_cpus, self._num_gpus, self._memory,
self._object_store_memory, self._resources, num_cpus, num_gpus,
memory, object_store_memory, resources)
def invocation(args, kwargs):
if not args and not kwargs and not self._function_signature:
list_args = []
else:
list_args = ray.signature.flatten_args(
self._function_signature, args, kwargs)
if worker.mode == ray.worker.LOCAL_MODE:
object_ids = worker.local_mode_manager.execute(
self._function, self._function_descriptor, args, kwargs,
num_return_vals)
else:
object_ids = worker.core_worker.submit_task(
self._function_descriptor_list, list_args, num_return_vals,
is_direct_call, resources, max_retries)
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
if self._decorator is not None:
invocation = self._decorator(invocation)
return invocation(args, kwargs)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/reporter.py
|
Python
|
import argparse
import logging
import json
import os
import traceback
import time
import datetime
try:
import psutil
except ImportError:
print("The reporter requires psutil to run.")
import sys
sys.exit(1)
import ray.ray_constants as ray_constants
import ray.services
import ray.utils
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
def recursive_asdict(o):
if isinstance(o, tuple) and hasattr(o, "_asdict"):
return recursive_asdict(o._asdict())
if isinstance(o, (tuple, list)):
L = []
for k in o:
L.append(recursive_asdict(k))
return L
if isinstance(o, dict):
D = {k: recursive_asdict(v) for k, v in o.items()}
return D
return o
def jsonify_asdict(o):
return json.dumps(recursive_asdict(o))
def is_worker(cmdline):
return cmdline and cmdline[0].startswith("ray::")
def to_posix_time(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Reporter:
"""A monitor process for monitoring Ray nodes.
Attributes:
host (str): The hostname of this machine. Used to improve the log
messages published to Redis.
redis_client: A client used to communicate with the Redis server.
"""
def __init__(self, redis_address, redis_password=None):
"""Initialize the reporter object."""
self.cpu_counts = (psutil.cpu_count(), psutil.cpu_count(logical=False))
self.ip = ray.services.get_node_ip_address()
self.hostname = os.uname().nodename
_ = psutil.cpu_percent() # For initialization
self.redis_key = "{}.{}".format(ray.gcs_utils.REPORTER_CHANNEL,
self.hostname)
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
@staticmethod
def get_cpu_percent():
return psutil.cpu_percent()
@staticmethod
def get_boot_time():
return psutil.boot_time()
@staticmethod
def get_network_stats():
ifaces = [
v for k, v in psutil.net_io_counters(pernic=True).items()
if k[0] == "e"
]
sent = sum((iface.bytes_sent for iface in ifaces))
recv = sum((iface.bytes_recv for iface in ifaces))
return sent, recv
@staticmethod
def get_mem_usage():
vm = psutil.virtual_memory()
return vm.total, vm.available, vm.percent
@staticmethod
def get_disk_usage():
return {x: psutil.disk_usage(x) for x in ["/", "/tmp"]}
@staticmethod
def get_workers():
return [
x.as_dict(attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
]) for x in psutil.process_iter(attrs=["cmdline"])
if is_worker(x.info["cmdline"])
]
def get_load_avg(self):
load = os.getloadavg()
per_cpu_load = tuple((round(x / self.cpu_counts[0], 2) for x in load))
return load, per_cpu_load
def get_all_stats(self):
now = to_posix_time(datetime.datetime.utcnow())
network_stats = self.get_network_stats()
self.network_stats_hist.append((now, network_stats))
self.network_stats_hist = self.network_stats_hist[-7:]
then, prev_network_stats = self.network_stats_hist[0]
netstats = ((network_stats[0] - prev_network_stats[0]) / (now - then),
(network_stats[1] - prev_network_stats[1]) / (now - then))
return {
"now": now,
"hostname": self.hostname,
"ip": self.ip,
"cpu": self.get_cpu_percent(),
"cpus": self.cpu_counts,
"mem": self.get_mem_usage(),
"workers": self.get_workers(),
"boot_time": self.get_boot_time(),
"load_avg": self.get_load_avg(),
"disk": self.get_disk_usage(),
"net": netstats,
}
def perform_iteration(self):
"""Get any changes to the log files and push updates to Redis."""
stats = self.get_all_stats()
self.redis_client.publish(
self.redis_key,
jsonify_asdict(stats),
)
def run(self):
"""Run the reporter."""
while True:
try:
self.perform_iteration()
except Exception:
traceback.print_exc()
pass
time.sleep(ray_constants.REPORTER_UPDATE_INTERVAL_MS / 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"reporter to connect to."))
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
reporter = Reporter(args.redis_address, redis_password=args.redis_password)
try:
reporter.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The reporter on node {} failed with the following "
"error:\n{}".format(os.uname()[1], traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.REPORTER_DIED_ERROR, message)
raise e
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/resource_spec.py
|
Python
|
import math
from collections import namedtuple
import logging
import multiprocessing
import os
import ray
import ray.ray_constants as ray_constants
logger = logging.getLogger(__name__)
# Prefix for the node id resource that is automatically added to each node.
# For example, a node may have id `node:172.23.42.1`.
NODE_ID_PREFIX = "node:"
class ResourceSpec(
namedtuple("ResourceSpec", [
"num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "redis_max_memory"
])):
"""Represents the resource configuration passed to a raylet.
All fields can be None. Before starting services, resolve() should be
called to return a ResourceSpec with unknown values filled in with
defaults based on the local machine specifications.
Attributes:
num_cpus: The CPUs allocated for this raylet.
num_gpus: The GPUs allocated for this raylet.
memory: The memory allocated for this raylet.
object_store_memory: The object store memory allocated for this raylet.
Note that when calling to_resource_dict(), this will be scaled down
by 30% to account for the global plasma LRU reserve.
resources: The custom resources allocated for this raylet.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
"""
def __new__(cls,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
redis_max_memory=None):
return super(ResourceSpec, cls).__new__(cls, num_cpus, num_gpus,
memory, object_store_memory,
resources, redis_max_memory)
def resolved(self):
"""Returns if this ResourceSpec has default values filled out."""
for v in self._asdict().values():
if v is None:
return False
return True
def to_resource_dict(self):
"""Returns a dict suitable to pass to raylet initialization.
This renames num_cpus / num_gpus to "CPU" / "GPU", translates memory
from bytes into 100MB memory units, and checks types.
"""
assert self.resolved()
memory_units = ray_constants.to_memory_units(
self.memory, round_up=False)
reservable_object_store_memory = (
self.object_store_memory *
ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)
if (reservable_object_store_memory <
ray_constants.MEMORY_RESOURCE_UNIT_BYTES):
raise ValueError(
"The minimum amount of object_store_memory that can be "
"requested is {}, but you specified {}.".format(
int(
math.ceil(
ray_constants.MEMORY_RESOURCE_UNIT_BYTES /
ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)),
self.object_store_memory))
object_store_memory_units = ray_constants.to_memory_units(
self.object_store_memory *
ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION,
round_up=False)
resources = dict(
self.resources,
CPU=self.num_cpus,
GPU=self.num_gpus,
memory=memory_units,
object_store_memory=object_store_memory_units)
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity != 0
}
# Check types.
for resource_label, resource_quantity in resources.items():
assert (isinstance(resource_quantity, int)
or isinstance(resource_quantity, float))
if (isinstance(resource_quantity, float)
and not resource_quantity.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers. "
"Violated by resource '{}' in {}.".format(
resource_label, resources))
if resource_quantity < 0:
raise ValueError("Resource quantities must be nonnegative. "
"Violated by resource '{}' in {}.".format(
resource_label, resources))
if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:
raise ValueError("Resource quantities must be at most {}. "
"Violated by resource '{}' in {}.".format(
ray_constants.MAX_RESOURCE_QUANTITY,
resource_label, resources))
return resources
def resolve(self, is_head):
"""Returns a copy with values filled out with system defaults."""
resources = (self.resources or {}).copy()
assert "CPU" not in resources, resources
assert "GPU" not in resources, resources
assert "memory" not in resources, resources
assert "object_store_memory" not in resources, resources
# Automatically create a node id resource on each node. This is
# queryable with ray.state.node_ids() and ray.state.current_node_id().
resources[NODE_ID_PREFIX + ray.services.get_node_ip_address()] = 1.0
num_cpus = self.num_cpus
if num_cpus is None:
num_cpus = multiprocessing.cpu_count()
num_gpus = self.num_gpus
gpu_ids = ray.utils.get_cuda_visible_devices()
# Check that the number of GPUs that the raylet wants doesn't
# excede the amount allowed by CUDA_VISIBLE_DEVICES.
if (num_gpus is not None and gpu_ids is not None
and num_gpus > len(gpu_ids)):
raise Exception("Attempting to start raylet with {} GPUs, "
"but CUDA_VISIBLE_DEVICES contains {}.".format(
num_gpus, gpu_ids))
if num_gpus is None:
# Try to automatically detect the number of GPUs.
num_gpus = _autodetect_num_gpus()
# Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.
if gpu_ids is not None:
num_gpus = min(num_gpus, len(gpu_ids))
# Choose a default object store size.
system_memory = ray.utils.get_system_memory()
avail_memory = ray.utils.estimate_available_memory()
object_store_memory = self.object_store_memory
if object_store_memory is None:
object_store_memory = int(avail_memory * 0.3)
# Cap memory to avoid memory waste and perf issues on large nodes
if (object_store_memory >
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES):
logger.debug(
"Warning: Capping object memory store to {}GB. ".format(
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES //
1e9) +
"To increase this further, specify `object_store_memory` "
"when calling ray.init() or ray start.")
object_store_memory = (
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES)
redis_max_memory = self.redis_max_memory
if redis_max_memory is None:
redis_max_memory = min(
ray_constants.DEFAULT_REDIS_MAX_MEMORY_BYTES,
max(
int(avail_memory * 0.1),
ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
if redis_max_memory < ray_constants.REDIS_MINIMUM_MEMORY_BYTES:
raise ValueError(
"Attempting to cap Redis memory usage at {} bytes, "
"but the minimum allowed is {} bytes.".format(
redis_max_memory,
ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
memory = self.memory
if memory is None:
memory = (avail_memory - object_store_memory - (redis_max_memory
if is_head else 0))
if memory < 100e6 and memory < 0.05 * system_memory:
raise ValueError(
"After taking into account object store and redis memory "
"usage, the amount of memory on this node available for "
"tasks and actors ({} GB) is less than {}% of total. "
"You can adjust these settings with "
"ray.init(memory=<bytes>, "
"object_store_memory=<bytes>).".format(
round(memory / 1e9, 2),
int(100 * (memory / system_memory))))
logger.info(
"Starting Ray with {} GiB memory available for workers and up to "
"{} GiB for objects. You can adjust these settings "
"with ray.init(memory=<bytes>, "
"object_store_memory=<bytes>).".format(
round(
ray_constants.round_to_memory_units(
memory, round_up=False) / (1024**3), 2),
round(object_store_memory / (1024**3), 2)))
spec = ResourceSpec(num_cpus, num_gpus, memory, object_store_memory,
resources, redis_max_memory)
assert spec.resolved()
return spec
def _autodetect_num_gpus():
"""Attempt to detect the number of GPUs on this machine.
TODO(rkn): This currently assumes Nvidia GPUs and Linux.
Returns:
The number of GPUs if any were detected, otherwise 0.
"""
proc_gpus_path = "/proc/driver/nvidia/gpus"
if os.path.isdir(proc_gpus_path):
return len(os.listdir(proc_gpus_path))
return 0
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/runtime_context.py
|
Python
|
import ray.worker
class RuntimeContext:
"""A class used for getting runtime context."""
def __init__(self, worker=None):
self.worker = worker
@property
def current_driver_id(self):
"""Get current driver ID for this worker or driver.
Returns:
If called by a driver, this returns the driver ID. If called in
a task, return the driver ID of the associated driver.
"""
assert self.worker is not None
return self.worker.current_job_id
_runtime_context = None
def _get_runtime_context():
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(ray.worker.get_global_worker())
return _runtime_context
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/scripts/scripts.py
|
Python
|
import click
from datetime import datetime
import json
import logging
import os
import subprocess
import sys
import time
import ray.services as services
from ray.autoscaler.commands import (
attach_cluster, exec_cluster, create_or_update_cluster, monitor_cluster,
rsync, teardown_cluster, get_head_node_ip, kill_node, get_worker_node_ips)
import ray.ray_constants as ray_constants
import ray.utils
from ray.projects.scripts import project_cli, session_cli
logger = logging.getLogger(__name__)
def check_no_existing_redis_clients(node_ip_address, redis_client):
# The client table prefix must be kept in sync with the file
# "src/ray/gcs/redis_module/ray_redis_module.cc" where it is defined.
REDIS_CLIENT_TABLE_PREFIX = "CL:"
client_keys = redis_client.keys("{}*".format(REDIS_CLIENT_TABLE_PREFIX))
# Filter to clients on the same node and do some basic checking.
for key in client_keys:
info = redis_client.hgetall(key)
assert b"ray_client_id" in info
assert b"node_ip_address" in info
assert b"client_type" in info
assert b"deleted" in info
# Clients that ran on the same node but that are marked dead can be
# ignored.
deleted = info[b"deleted"]
deleted = bool(int(deleted))
if deleted:
continue
if ray.utils.decode(info[b"node_ip_address"]) == node_ip_address:
raise Exception("This Redis instance is already connected to "
"clients with this IP address.")
@click.group()
@click.option(
"--logging-level",
required=False,
default=ray_constants.LOGGER_LEVEL,
type=str,
help=ray_constants.LOGGER_LEVEL_HELP)
@click.option(
"--logging-format",
required=False,
default=ray_constants.LOGGER_FORMAT,
type=str,
help=ray_constants.LOGGER_FORMAT_HELP)
def cli(logging_level, logging_format):
level = logging.getLevelName(logging_level.upper())
ray.utils.setup_logger(level, logging_format)
@cli.command()
@click.option(
"--node-ip-address",
required=False,
type=str,
help="the IP address of this node")
@click.option(
"--redis-address", required=False, type=str, help="same as --address")
@click.option(
"--address", required=False, type=str, help="the address to use for Ray")
@click.option(
"--redis-port",
required=False,
type=str,
help="the port to use for starting Redis")
@click.option(
"--num-redis-shards",
required=False,
type=int,
help=("the number of additional Redis shards to use in "
"addition to the primary Redis shard"))
@click.option(
"--redis-max-clients",
required=False,
type=int,
help=("If provided, attempt to configure Redis with this "
"maximum number of clients."))
@click.option(
"--redis-password",
required=False,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="If provided, secure Redis ports with this password")
@click.option(
"--redis-shard-ports",
required=False,
type=str,
help="the port to use for the Redis shards other than the "
"primary Redis shard")
@click.option(
"--object-manager-port",
required=False,
type=int,
help="the port to use for starting the object manager")
@click.option(
"--node-manager-port",
required=False,
type=int,
help="the port to use for starting the node manager")
@click.option(
"--memory",
required=False,
type=int,
help="The amount of memory (in bytes) to make available to workers. "
"By default, this is set to the available memory on the node.")
@click.option(
"--object-store-memory",
required=False,
type=int,
help="The amount of memory (in bytes) to start the object store with. "
"By default, this is capped at 20GB but can be set higher.")
@click.option(
"--redis-max-memory",
required=False,
type=int,
help="The max amount of memory (in bytes) to allow redis to use. Once the "
"limit is exceeded, redis will start LRU eviction of entries. This only "
"applies to the sharded redis tables (task, object, and profile tables). "
"By default this is capped at 10GB but can be set higher.")
@click.option(
"--num-cpus",
required=False,
type=int,
help="the number of CPUs on this node")
@click.option(
"--num-gpus",
required=False,
type=int,
help="the number of GPUs on this node")
@click.option(
"--resources",
required=False,
default="{}",
type=str,
help="a JSON serialized dictionary mapping resource name to "
"resource quantity")
@click.option(
"--head",
is_flag=True,
default=False,
help="provide this argument for the head node")
@click.option(
"--include-webui",
default=None,
type=bool,
help="provide this argument if the UI should be started")
@click.option(
"--webui-host",
required=False,
default="localhost",
help="The host to bind the web UI server to. Can either be localhost "
"(127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this "
"is set to localhost to prevent access from external machines.")
@click.option(
"--block",
is_flag=True,
default=False,
help="provide this argument to block forever in this command")
@click.option(
"--plasma-directory",
required=False,
type=str,
help="object store directory for memory mapped files")
@click.option(
"--huge-pages",
is_flag=True,
default=False,
help="enable support for huge pages in the object store")
@click.option(
"--autoscaling-config",
required=False,
type=str,
help="the file that contains the autoscaling config")
@click.option(
"--no-redirect-worker-output",
is_flag=True,
default=False,
help="do not redirect worker stdout and stderr to files")
@click.option(
"--no-redirect-output",
is_flag=True,
default=False,
help="do not redirect non-worker stdout and stderr to files")
@click.option(
"--plasma-store-socket-name",
default=None,
help="manually specify the socket name of the plasma store")
@click.option(
"--raylet-socket-name",
default=None,
help="manually specify the socket path of the raylet process")
@click.option(
"--temp-dir",
default=None,
help="manually specify the root temporary dir of the Ray process")
@click.option(
"--include-java",
is_flag=True,
default=None,
help="Enable Java worker support.")
@click.option(
"--java-worker-options",
required=False,
default=None,
type=str,
help="Overwrite the options to start Java workers.")
@click.option(
"--internal-config",
default=None,
type=str,
help="Do NOT use this. This is for debugging/development purposes ONLY.")
@click.option(
"--load-code-from-local",
is_flag=True,
default=False,
help="Specify whether load code from local file or GCS serialization.")
@click.option(
"--use-pickle/--no-use-pickle",
is_flag=True,
default=ray.cloudpickle.FAST_CLOUDPICKLE_USED,
help="Use pickle for serialization.")
def start(node_ip_address, redis_address, address, redis_port,
num_redis_shards, redis_max_clients, redis_password,
redis_shard_ports, object_manager_port, node_manager_port, memory,
object_store_memory, redis_max_memory, num_cpus, num_gpus, resources,
head, include_webui, webui_host, block, plasma_directory, huge_pages,
autoscaling_config, no_redirect_worker_output, no_redirect_output,
plasma_store_socket_name, raylet_socket_name, temp_dir, include_java,
java_worker_options, load_code_from_local, use_pickle,
internal_config):
if redis_address is not None:
raise DeprecationWarning("The --redis-address argument is "
"deprecated. Please use --address instead.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if redis_address is not None or address is not None:
(redis_address, redis_address_ip,
redis_address_port) = services.validate_redis_address(
address, redis_address)
try:
resources = json.loads(resources)
except Exception:
raise Exception("Unable to parse the --resources argument using "
"json.loads. Try using a format like\n\n"
" --resources='{\"CustomResource1\": 3, "
"\"CustomReseource2\": 2}'")
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_java=include_java,
include_webui=include_webui,
webui_host=webui_host,
java_worker_options=java_worker_options,
load_code_from_local=load_code_from_local,
use_pickle=use_pickle,
_internal_config=internal_config)
if head:
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
if num_redis_shards is None:
num_redis_shards = len(redis_shard_ports)
# Check that the arguments match.
if len(redis_shard_ports) != num_redis_shards:
raise Exception("If --redis-shard-ports is provided, it must "
"have the form '6380,6381,6382', and the "
"number of ports provided must equal "
"--num-redis-shards (which is 1 if not "
"provided)")
if redis_address is not None:
raise Exception("If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided.")
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address())
logger.info("Using IP address {} for this node.".format(
ray_params.node_ip_address))
ray_params.update_if_absent(
redis_port=redis_port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
autoscaling_config=autoscaling_config,
include_java=False,
)
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block)
redis_address = node.redis_address
logger.info(
"\nStarted Ray on this node. You can add additional nodes to "
"the cluster by calling\n\n"
" ray start --address='{}'{}\n\n"
"from the node you wish to add. You can connect a driver to the "
"cluster from Python by running\n\n"
" import ray\n"
" ray.init(address='auto'{})\n\n"
"If you have trouble connecting from a different machine, check "
"that your firewall is configured properly. If you wish to "
"terminate the processes that have been started, run\n\n"
" ray stop".format(
redis_address, " --redis-password='" + redis_password + "'"
if redis_password else "",
", redis_password='" + redis_password + "'"
if redis_password else ""))
else:
# Start Ray on a non-head node.
if redis_port is not None:
raise Exception("If --head is not passed in, --redis-port is not "
"allowed.")
if redis_shard_ports is not None:
raise Exception("If --head is not passed in, --redis-shard-ports "
"is not allowed.")
if redis_address is None:
raise Exception("If --head is not passed in, --redis-address must "
"be provided.")
if num_redis_shards is not None:
raise Exception("If --head is not passed in, --num-redis-shards "
"must not be provided.")
if redis_max_clients is not None:
raise Exception("If --head is not passed in, --redis-max-clients "
"must not be provided.")
if include_webui:
raise Exception("If --head is not passed in, the --include-webui "
"flag is not relevant.")
if include_java is not None:
raise ValueError("--include-java should only be set for the head "
"node.")
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address))
logger.info("Using IP address {} for this node.".format(
ray_params.node_ip_address))
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address,
redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block)
logger.info("\nStarted Ray on this node. If you wish to terminate the "
"processes that have been started, run\n\n"
" ray stop")
if block:
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
logger.error("Ray processes died unexpectedly:")
for process_type, process in deceased:
logger.error("\t{} died with exit code {}".format(
process_type, process.returncode))
# shutdown_at_exit will handle cleanup.
logger.error("Killing remaining processes and exiting...")
sys.exit(1)
@cli.command()
@click.option(
"-f",
"--force",
is_flag=True,
help="If set, ray will send SIGKILL instead of SIGTERM.")
@click.option(
"-v",
"--verbose",
is_flag=True,
help="If set, ray prints out more information about processes to kill.")
def stop(force, verbose):
# Note that raylet needs to exit before object store, otherwise
# it cannot exit gracefully.
processes_to_kill = [
# The first element is the substring to filter.
# The second element, if True, is to filter ps results by command name
# (only the first 15 charactors of the executable name);
# if False, is to filter ps results by command with all its arguments.
# See STANDARD FORMAT SPECIFIERS section of
# http://man7.org/linux/man-pages/man1/ps.1.html
# about comm and args. This can help avoid killing non-ray processes.
# Format:
# Keyword to filter, filter by command (True)/filter by args (False)
["raylet", True],
["plasma_store", True],
["raylet_monitor", True],
["monitor.py", False],
["redis-server", False],
["default_worker.py", False], # Python worker.
["ray::", True], # Python worker.
["org.ray.runtime.runner.worker.DefaultWorker", False], # Java worker.
["log_monitor.py", False],
["reporter.py", False],
["dashboard.py", False],
["ray_process_reaper.py", False],
]
for process in processes_to_kill:
keyword, filter_by_cmd = process
if filter_by_cmd:
ps_format = "pid,comm"
# According to https://superuser.com/questions/567648/ps-comm-format-always-cuts-the-process-name, # noqa: E501
# comm only prints the first 15 characters of the executable name.
if len(keyword) > 15:
raise ValueError("The filter string should not be more than" +
" 15 characters. Actual length: " +
str(len(keyword)) + ". Filter: " + keyword)
else:
ps_format = "pid,args"
debug_operator = "| tee /dev/stderr" if verbose else ""
command = (
"kill -s {} $(ps ax -o {} | grep {} | grep -v grep {} |"
"awk '{{ print $1 }}') 2> /dev/null".format(
# ^^ This is how you escape braces in python format string.
"KILL" if force else "TERM",
ps_format,
keyword,
debug_operator))
if verbose:
logger.info("Calling '{}'".format(command))
subprocess.call([command], shell=True)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--no-restart",
is_flag=True,
default=False,
help=("Whether to skip restarting Ray services during the update. "
"This avoids interrupting running jobs."))
@click.option(
"--restart-only",
is_flag=True,
default=False,
help=("Whether to skip running setup commands and only restart Ray. "
"This cannot be used with 'no-restart'."))
@click.option(
"--min-workers",
required=False,
type=int,
help="Override the configured min worker node count for the cluster.")
@click.option(
"--max-workers",
required=False,
type=int,
help="Override the configured max worker node count for the cluster.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
def create_or_update(cluster_config_file, min_workers, max_workers, no_restart,
restart_only, yes, cluster_name):
"""Create or update a Ray cluster."""
if restart_only or no_restart:
assert restart_only != no_restart, "Cannot set both 'restart_only' " \
"and 'no_restart' at the same time!"
create_or_update_cluster(cluster_config_file, min_workers, max_workers,
no_restart, restart_only, yes, cluster_name)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--workers-only",
is_flag=True,
default=False,
help="Only destroy the workers.")
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def teardown(cluster_config_file, yes, workers_only, cluster_name):
"""Tear down the Ray cluster."""
teardown_cluster(cluster_config_file, yes, workers_only, cluster_name)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--hard",
is_flag=True,
default=False,
help="Terminates the node via node provider (defaults to a 'soft kill'"
" which terminates Ray but does not actually delete the instances).")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def kill_random_node(cluster_config_file, yes, hard, cluster_name):
"""Kills a random Ray node. For testing purposes only."""
click.echo("Killed node with IP " +
kill_node(cluster_config_file, yes, hard, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--lines",
required=False,
default=100,
type=int,
help="Number of lines to tail.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def monitor(cluster_config_file, lines, cluster_name):
"""Runs `tail -n [lines] -f /tmp/ray/session_*/logs/monitor*` on head."""
monitor_cluster(cluster_config_file, lines, cluster_name)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen", is_flag=True, default=False, help="Run the command in screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--new", "-N", is_flag=True, help="Force creation of a new screen.")
def attach(cluster_config_file, start, screen, tmux, cluster_name, new):
attach_cluster(cluster_config_file, start, screen, tmux, cluster_name, new)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def rsync_down(cluster_config_file, source, target, cluster_name):
rsync(cluster_config_file, source, target, cluster_name, down=True)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def rsync_up(cluster_config_file, source, target, cluster_name):
rsync(cluster_config_file, source, target, cluster_name, down=False)
@cli.command(context_settings={"ignore_unknown_options": True})
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--docker",
is_flag=True,
default=False,
help="Runs command in the docker container specified in cluster_config.")
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--port-forward",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@click.argument("script", required=True, type=str)
@click.option("--args", required=False, type=str, help="Script args.")
def submit(cluster_config_file, docker, screen, tmux, stop, start,
cluster_name, port_forward, script, args):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py --args="--smoke-test"
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
if start:
create_or_update_cluster(cluster_config_file, None, None, False, False,
True, cluster_name)
target = os.path.join("~", os.path.basename(script))
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if args is not None:
command_parts += [args]
cmd = " ".join(command_parts)
exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, False,
cluster_name, list(port_forward))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("cmd", required=True, type=str)
@click.option(
"--docker",
is_flag=True,
default=False,
help="Runs command in the docker container specified in cluster_config.")
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--port-forward",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
def exec_cmd(cluster_config_file, cmd, docker, screen, tmux, stop, start,
cluster_name, port_forward):
exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, start,
cluster_name, list(port_forward))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_head_ip(cluster_config_file, cluster_name):
click.echo(get_head_node_ip(cluster_config_file, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_worker_ips(cluster_config_file, cluster_name):
worker_ips = get_worker_node_ips(cluster_config_file, cluster_name)
click.echo("\n".join(worker_ips))
@cli.command()
def stack():
COMMAND = """
pyspy=`which py-spy`
if [ ! -e "$pyspy" ]; then
echo "ERROR: Please 'pip install py-spy' (or ray[debug]) first"
exit 1
fi
# Set IFS to iterate over lines instead of over words.
export IFS="
"
# Call sudo to prompt for password before anything has been printed.
sudo true
workers=$(
ps aux | grep -E ' ray_|default_worker.py' | grep -v grep
)
for worker in $workers; do
echo "Stack dump for $worker";
pid=`echo $worker | awk '{print $2}'`;
sudo $pyspy dump --pid $pid;
echo;
done
"""
subprocess.call(COMMAND, shell=True)
@cli.command()
def microbenchmark():
from ray.ray_perf import main
main()
@cli.command()
def clusterbenchmark():
from ray.ray_cluster_perf import main
main()
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the redis address to connect to.")
def timeline(address):
if not address:
address = services.find_redis_address_or_die()
logger.info("Connecting to Ray instance at {}.".format(address))
ray.init(address=address)
time = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
filename = "/tmp/ray-timeline-{}.json".format(time)
ray.timeline(filename=filename)
size = os.path.getsize(filename)
logger.info("Trace file written to {} ({} bytes).".format(filename, size))
logger.info(
"You can open this with chrome://tracing in the Chrome browser.")
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def stat(address):
if not address:
address = services.find_redis_address_or_die()
logger.info("Connecting to Ray instance at {}.".format(address))
ray.init(address=address)
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
for raylet in ray.nodes():
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
logger.info("Querying raylet {}".format(raylet_address))
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(), timeout=2.0)
print(reply)
cli.add_command(start)
cli.add_command(stop)
cli.add_command(create_or_update, name="up")
cli.add_command(attach)
cli.add_command(exec_cmd, name="exec")
cli.add_command(rsync_down, name="rsync_down")
cli.add_command(rsync_up, name="rsync_up")
cli.add_command(submit)
cli.add_command(teardown)
cli.add_command(teardown, name="down")
cli.add_command(kill_random_node)
cli.add_command(get_head_ip, name="get_head_ip")
cli.add_command(get_worker_ips)
cli.add_command(microbenchmark)
cli.add_command(stack)
cli.add_command(stat)
cli.add_command(timeline)
cli.add_command(project_cli)
cli.add_command(session_cli)
try:
from ray.experimental.serve.scripts import serve_cli
cli.add_command(serve_cli)
except Exception as e:
logger.debug(
"Integrating ray serve command line tool failed with {}".format(e))
def main():
return cli()
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/serialization.py
|
Python
|
import hashlib
import io
import logging
import time
import pyarrow
import pyarrow.plasma as plasma
import ray.cloudpickle as pickle
from ray import ray_constants, JobID
import ray.utils
from ray.utils import _random_string
from ray.gcs_utils import ErrorType
from ray.exceptions import (
RayActorError,
RayWorkerError,
UnreconstructableError,
RAY_EXCEPTION_TYPES,
)
from ray._raylet import Pickle5Writer, unpack_pickle5_buffers
logger = logging.getLogger(__name__)
class RayNotDictionarySerializable(Exception):
pass
# This exception is used to represent situations where cloudpickle fails to
# pickle an object (cloudpickle can fail in many different ways).
class CloudPickleError(Exception):
pass
class DeserializationError(Exception):
pass
class SerializedObject:
def __init__(self, metadata):
self._metadata = metadata
@property
def total_bytes(self):
raise NotImplementedError
@property
def metadata(self):
return self._metadata
class Pickle5SerializedObject(SerializedObject):
def __init__(self, inband, writer):
super(Pickle5SerializedObject,
self).__init__(ray_constants.PICKLE5_BUFFER_METADATA)
self.inband = inband
self.writer = writer
# cached total bytes
self._total_bytes = None
@property
def total_bytes(self):
if self._total_bytes is None:
self._total_bytes = self.writer.get_total_bytes(self.inband)
return self._total_bytes
class ArrowSerializedObject(SerializedObject):
def __init__(self, serialized_object):
super(ArrowSerializedObject, self).__init__(b"")
self.serialized_object = serialized_object
@property
def total_bytes(self):
return self.serialized_object.total_bytes
class RawSerializedObject(SerializedObject):
def __init__(self, value):
super(RawSerializedObject,
self).__init__(ray_constants.RAW_BUFFER_METADATA)
self.value = value
@property
def total_bytes(self):
return len(self.value)
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
"WARNING: Could not produce a deterministic class ID for class "
"{}".format(cls))
return hashlib.sha1(new_class_id).digest()
class SerializationContext:
"""Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling.
"""
def __init__(self, worker):
self.worker = worker
self.use_pickle = worker.use_pickle
def actor_handle_serializer(obj):
return obj._serialization_helper(True)
def actor_handle_deserializer(serialized_obj):
new_handle = ray.actor.ActorHandle.__new__(ray.actor.ActorHandle)
new_handle._deserialization_helper(serialized_obj, True)
return new_handle
if not worker.use_pickle:
serialization_context = pyarrow.default_serialization_context()
# Tell the serialization context to use the cloudpickle version
# that we ship with Ray.
serialization_context.set_pickle(pickle.dumps, pickle.loads)
pyarrow.register_torch_serialization_handlers(
serialization_context)
def id_serializer(obj):
return pickle.dumps(obj)
def id_deserializer(serialized_obj):
return pickle.loads(serialized_obj)
def object_id_serializer(obj):
owner_id = ""
owner_address = ""
if obj.is_direct_call_type():
worker = ray.worker.get_global_worker()
worker.check_connected()
obj, owner_id, owner_address = (
worker.core_worker.serialize_and_promote_object_id(obj)
)
obj = obj.__reduce__()
owner_id = owner_id.__reduce__() if owner_id else owner_id
return pickle.dumps((obj, owner_id, owner_address))
def object_id_deserializer(serialized_obj):
obj_id, owner_id, owner_address = pickle.loads(serialized_obj)
# NOTE(swang): Must deserialize the object first before asking
# the core worker to resolve the value. This is to make sure
# that the ref count for the ObjectID is greater than 0 by the
# time the core worker resolves the value of the object.
deserialized_object_id = obj_id[0](obj_id[1][0])
if owner_id:
worker = ray.worker.get_global_worker()
worker.check_connected()
# UniqueIDs are serialized as
# (class name, (unique bytes,)).
worker.core_worker.deserialize_and_register_object_id(
obj_id[1][0], owner_id[1][0], owner_address)
return deserialized_object_id
for id_type in ray._raylet._ID_TYPES:
if id_type == ray._raylet.ObjectID:
serialization_context.register_type(
id_type,
"{}.{}".format(id_type.__module__, id_type.__name__),
custom_serializer=object_id_serializer,
custom_deserializer=object_id_deserializer)
else:
serialization_context.register_type(
id_type,
"{}.{}".format(id_type.__module__, id_type.__name__),
custom_serializer=id_serializer,
custom_deserializer=id_deserializer)
# We register this serializer on each worker instead of calling
# _register_custom_serializer from the driver so that isinstance
# still works.
serialization_context.register_type(
ray.actor.ActorHandle,
"ray.ActorHandle",
pickle=False,
custom_serializer=actor_handle_serializer,
custom_deserializer=actor_handle_deserializer)
self.pyarrow_context = serialization_context
else:
self._register_cloudpickle_serializer(
ray.actor.ActorHandle,
custom_serializer=actor_handle_serializer,
custom_deserializer=actor_handle_deserializer)
def id_serializer(obj):
return obj.__reduce__()
def id_deserializer(serialized_obj):
return serialized_obj[0](*serialized_obj[1])
def object_id_serializer(obj):
owner_id = ""
owner_address = ""
if obj.is_direct_call_type():
worker = ray.worker.get_global_worker()
worker.check_connected()
obj, owner_id, owner_address = (
worker.core_worker.serialize_and_promote_object_id(obj)
)
obj = id_serializer(obj)
owner_id = id_serializer(owner_id) if owner_id else owner_id
return (obj, owner_id, owner_address)
def object_id_deserializer(serialized_obj):
obj_id, owner_id, owner_address = serialized_obj
# NOTE(swang): Must deserialize the object first before asking
# the core worker to resolve the value. This is to make sure
# that the ref count for the ObjectID is greater than 0 by the
# time the core worker resolves the value of the object.
deserialized_object_id = id_deserializer(obj_id)
if owner_id:
worker = ray.worker.get_global_worker()
worker.check_connected()
# UniqueIDs are serialized as
# (class name, (unique bytes,)).
worker.core_worker.deserialize_and_register_object_id(
obj_id[1][0], owner_id[1][0], owner_address)
return deserialized_object_id
for id_type in ray._raylet._ID_TYPES:
if id_type == ray._raylet.ObjectID:
self._register_cloudpickle_serializer(
id_type, object_id_serializer, object_id_deserializer)
else:
self._register_cloudpickle_serializer(
id_type, id_serializer, id_deserializer)
def initialize(self):
""" Register custom serializers """
if not self.worker.use_pickle:
for error_cls in RAY_EXCEPTION_TYPES:
self.register_custom_serializer(
error_cls,
use_dict=True,
local=True,
class_id=error_cls.__module__ + ". " + error_cls.__name__,
)
# Tell Ray to serialize lambdas with pickle.
self.register_custom_serializer(
type(lambda: 0),
use_pickle=True,
local=True,
class_id="lambda")
# Tell Ray to serialize types with pickle.
self.register_custom_serializer(
type(int), use_pickle=True, local=True, class_id="type")
# Tell Ray to serialize RayParameters as dictionaries. This is
# used when passing around actor handles.
self.register_custom_serializer(
ray.signature.RayParameter,
use_dict=True,
local=True,
class_id="ray.signature.RayParameter")
# Tell Ray to serialize StringIO with pickle. We do this because
# Ray's default __dict__ serialization is incorrect for this type
# (the object's __dict__ is empty and therefore doesn't
# contain the full state of the object).
self.register_custom_serializer(
io.StringIO,
use_pickle=True,
local=True,
class_id="io.StringIO")
def _register_cloudpickle_serializer(self, cls, custom_serializer,
custom_deserializer):
if pickle.FAST_CLOUDPICKLE_USED:
def _CloudPicklerReducer(obj):
return custom_deserializer, (custom_serializer(obj), )
# construct a reducer
pickle.CloudPickler.dispatch[cls] = _CloudPicklerReducer
else:
def _CloudPicklerReducer(_self, obj):
_self.save_reduce(
custom_deserializer, (custom_serializer(obj), ), obj=obj)
# use a placeholder for 'self' argument
pickle.CloudPickler.dispatch[cls] = _CloudPicklerReducer
def _deserialize_object_from_arrow(self, data, metadata, object_id):
if metadata:
if metadata == ray_constants.PICKLE5_BUFFER_METADATA:
if not self.use_pickle:
raise ValueError("Receiving pickle5 serialized objects "
"while the serialization context is "
"using pyarrow as the backend.")
try:
in_band, buffers = unpack_pickle5_buffers(data)
if len(buffers) > 0:
return pickle.loads(in_band, buffers=buffers)
else:
return pickle.loads(in_band)
# cloudpickle does not provide error types
except pickle.pickle.PicklingError:
raise DeserializationError()
# Check if the object should be returned as raw bytes.
if metadata == ray_constants.RAW_BUFFER_METADATA:
if data is None:
return b""
return data.to_pybytes()
# Otherwise, return an exception object based on
# the error type.
error_type = int(metadata)
if error_type == ErrorType.Value("WORKER_DIED"):
return RayWorkerError()
elif error_type == ErrorType.Value("ACTOR_DIED"):
return RayActorError()
elif error_type == ErrorType.Value("OBJECT_UNRECONSTRUCTABLE"):
return UnreconstructableError(ray.ObjectID(object_id.binary()))
else:
assert error_type != ErrorType.Value("OBJECT_IN_PLASMA"), \
"Tried to get object that has been promoted to plasma."
assert False, "Unrecognized error type " + str(error_type)
elif data:
if self.use_pickle:
raise ValueError("Receiving plasma serialized objects "
"while the serialization context is "
"using pickle5 as the backend.")
try:
# If data is not empty, deserialize the object.
return pyarrow.deserialize(data, self.pyarrow_context)
except pyarrow.DeserializationCallbackError:
raise DeserializationError()
else:
# Object isn't available in plasma. This should never be returned
# to the user. We should only reach this line if this object was
# deserialized as part of a list, and another object in the list
# throws an exception.
return plasma.ObjectNotAvailable
def _store_and_register_pyarrow(self, value, depth=100):
"""Store an object and attempt to register its class if needed.
Args:
value: The value to put in the object store.
depth: The maximum number of classes to recursively register.
Raises:
Exception: An exception is raised if the attempt to serialize the
object fails.
"""
counter = 0
while True:
if counter == depth:
raise Exception("Ray exceeded the maximum number of classes "
"that it will recursively serialize when "
"attempting to serialize an object of "
"type {}.".format(type(value)))
counter += 1
try:
return pyarrow.serialize(value, self.pyarrow_context)
except pyarrow.SerializationCallbackError as e:
cls_type = type(e.example_object)
try:
self.register_custom_serializer(cls_type, use_dict=True)
warning_message = (
"WARNING: Serializing objects of type "
"{} by expanding them as dictionaries "
"of their fields. This behavior may "
"be incorrect in some cases.".format(cls_type))
logger.debug(warning_message)
except (RayNotDictionarySerializable, CloudPickleError,
pickle.pickle.PicklingError, Exception):
# We also handle generic exceptions here because
# cloudpickle can fail with many different types of errors.
warning_message = (
"Falling back to serializing {} objects by using "
"pickle. Use `ray.register_custom_serializer({},...)` "
"to provide faster serialization.".format(
cls_type, cls_type))
try:
self.register_custom_serializer(
cls_type, use_pickle=True)
logger.warning(warning_message)
except (CloudPickleError, ValueError):
self.register_custom_serializer(
cls_type, use_pickle=True, local=True)
warning_message = ("WARNING: Pickling the class {} "
"failed, so we are using pickle "
"and only registering the class "
"locally.".format(cls_type))
logger.warning(warning_message)
def deserialize_objects(self,
data_metadata_pairs,
object_ids,
error_timeout=10):
pass
assert len(data_metadata_pairs) == len(object_ids)
start_time = time.time()
results = []
warning_sent = False
i = 0
while i < len(object_ids):
object_id = object_ids[i]
data, metadata = data_metadata_pairs[i]
try:
results.append(
self._deserialize_object_from_arrow(
data, metadata, object_id))
i += 1
except DeserializationError:
# Wait a little bit for the import thread to import the class.
# If we currently have the worker lock, we need to release it
# so that the import thread can acquire it.
time.sleep(0.01)
if time.time() - start_time > error_timeout:
warning_message = ("This worker or driver is waiting to "
"receive a class definition so that it "
"can deserialize an object from the "
"object store. This may be fine, or it "
"may be a bug.")
if not warning_sent:
ray.utils.push_error_to_driver(
self,
ray_constants.WAIT_FOR_CLASS_PUSH_ERROR,
warning_message,
job_id=self.worker.current_job_id)
warning_sent = True
return results
def serialize(self, value):
"""Serialize an object.
Args:
value: The value to serialize.
"""
if isinstance(value, bytes):
# If the object is a byte array, skip serializing it and
# use a special metadata to indicate it's raw binary. So
# that this object can also be read by Java.
return RawSerializedObject(value)
if self.worker.use_pickle:
writer = Pickle5Writer()
if ray.cloudpickle.FAST_CLOUDPICKLE_USED:
inband = pickle.dumps(
value, protocol=5, buffer_callback=writer.buffer_callback)
else:
inband = pickle.dumps(value)
return Pickle5SerializedObject(inband, writer)
else:
try:
serialized_value = self._store_and_register_pyarrow(value)
except TypeError:
# TypeError can happen because one of the members of the object
# may not be serializable for cloudpickle. So we need
# these extra fallbacks here to start from the beginning.
# Hopefully the object could have a `__reduce__` method.
self.register_custom_serializer(type(value), use_pickle=True)
logger.warning("WARNING: Serializing the class {} failed, "
"falling back to cloudpickle.".format(
type(value)))
serialized_value = self._store_and_register_pyarrow(value)
return ArrowSerializedObject(serialized_value)
def register_custom_serializer(self,
cls,
use_pickle=False,
use_dict=False,
serializer=None,
deserializer=None,
local=False,
job_id=None,
class_id=None):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on
every worker, which will enable ray to properly serialize and
deserialize objects of this class.
Args:
cls (type): The class that ray should use this custom serializer
for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized
turning their __dict__ fields into a dictionary. Must be False
if use_pickle is true.
serializer: The custom serializer to use. This should be provided
if and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be
provided if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the
current worker. This should usually be False.
job_id: ID of the job that we want to register the class for.
class_id (str): Unique ID of the class. Autogenerated if None.
Raises:
RayNotDictionarySerializable: Raised if use_dict is true and cls
cannot be efficiently serialized by Ray.
ValueError: Raised if ray could not autogenerate a class_id.
"""
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided.")
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer "
"must be specified.")
if self.worker.use_pickle and serializer is None:
# In this case it should do nothing.
return
if use_dict:
# Raise an exception if cls cannot be serialized
# efficiently by Ray.
check_serializable(cls)
if class_id is None:
if not local:
# In this case, the class ID will be used to deduplicate the
# class across workers. Note that cloudpickle unfortunately
# does not produce deterministic strings, so these IDs could
# be different on different workers. We could use something
# weaker like cls.__name__, however that would run the risk
# of having collisions.
# TODO(rkn): We should improve this.
try:
# Attempt to produce a class ID that will be the same on
# each worker. However, determinism is not guaranteed,
# and the result may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception:
raise ValueError(
"Failed to use pickle in generating a unique id"
"for '{}'. Provide a unique class_id.".format(cls))
else:
# In this case, the class ID only needs to be meaningful on
# this worker and not across workers.
class_id = _random_string()
# Make sure class_id is a string.
class_id = ray.utils.binary_to_hex(class_id)
if job_id is None:
job_id = self.worker.current_job_id
assert isinstance(job_id, JobID)
def register_class_for_serialization(worker_info):
context = worker_info["worker"].get_serialization_context(job_id)
if worker_info["worker"].use_pickle:
context._register_cloudpickle_serializer(
cls, serializer, deserializer)
else:
# TODO(rkn): We need to be more thoughtful about what to do if
# custom serializers have already been registered for
# class_id. In some cases, we may want to use the last
# user-defined serializers and ignore subsequent calls to
# register_custom_serializer that were made by the system.
context.pyarrow_context.register_type(
cls,
class_id,
pickle=use_pickle,
custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
self.worker.run_function_on_all_workers(
register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually
# need to ship the class definition.
register_class_for_serialization({"worker": self.worker})
def check_serializable(cls):
"""Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently.
"""
if is_named_tuple(cls):
# This case works.
return
if not hasattr(cls, "__new__"):
print("The class {} does not have a '__new__' attribute and is "
"probably an old-stye class. Please make it a new-style class "
"by inheriting from 'object'.")
raise RayNotDictionarySerializable("The class {} does not have a "
"'__new__' attribute and is "
"probably an old-style class. We "
"do not support this. Please make "
"it a new-style class by "
"inheriting from 'object'."
.format(cls))
try:
obj = cls.__new__(cls)
except Exception:
raise RayNotDictionarySerializable("The class {} has overridden "
"'__new__', so Ray may not be "
"able to serialize it "
"efficiently.".format(cls))
if not hasattr(obj, "__dict__"):
raise RayNotDictionarySerializable("Objects of the class {} do not "
"have a '__dict__' attribute, so "
"Ray cannot serialize it "
"efficiently.".format(cls))
if hasattr(obj, "__slots__"):
raise RayNotDictionarySerializable("The class {} uses '__slots__', so "
"Ray may not be able to serialize "
"it efficiently.".format(cls))
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/services.py
|
Python
|
import collections
import json
import logging
import multiprocessing
import os
import random
import re
import resource
import socket
import subprocess
import sys
import time
import redis
import colorama
import pyarrow
# Ray modules
import ray
import ray.ray_constants as ray_constants
# True if processes are run in the valgrind profiler.
RUN_RAYLET_PROFILER = False
RUN_PLASMA_STORE_PROFILER = False
# Location of the redis server and module.
RAY_HOME = os.path.join(os.path.dirname(__file__), "../..")
REDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/thirdparty/redis/src/redis-server")
REDIS_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/gcs/redis_module/libray_redis_module.so")
# Location of the credis server and modules.
# credis will be enabled if the environment variable RAY_USE_NEW_GCS is set.
CREDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/redis/src/redis-server")
CREDIS_MASTER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmaster.so")
CREDIS_MEMBER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmember.so")
# Location of the plasma object store executable.
PLASMA_STORE_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/plasma/plasma_store_server")
# Location of the raylet executables.
RAYLET_MONITOR_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/raylet/raylet_monitor")
RAYLET_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "core/src/ray/raylet/raylet")
DEFAULT_JAVA_WORKER_OPTIONS = "-classpath {}".format(
os.path.join(
os.path.abspath(os.path.dirname(__file__)), "../../../build/java/*"))
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
ProcessInfo = collections.namedtuple("ProcessInfo", [
"process", "stdout_file", "stderr_file", "use_valgrind", "use_gdb",
"use_valgrind_profiler", "use_perftools_profiler", "use_tmux"
])
def address(ip_address, port):
return ip_address + ":" + str(port)
def new_port():
return random.randint(10000, 65535)
def include_java_from_redis(redis_client):
"""This is used for query include_java bool from redis.
Args:
redis_client (StrictRedis): The redis client to GCS.
Returns:
True if this cluster backend enables Java worker.
"""
return redis_client.get("INCLUDE_JAVA") == b"1"
def find_redis_address_or_die():
try:
import psutil
except ImportError:
raise ImportError(
"Please install `psutil` to automatically detect the Ray cluster.")
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
for arglist in proc.cmdline():
for arg in arglist.split(" "):
if arg.startswith("--redis-address="):
addr = arg.split("=")[1]
redis_addresses.add(addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
if len(redis_addresses) > 1:
raise ConnectionError(
"Found multiple active Ray instances: {}. ".format(redis_addresses)
+ "Please specify the one to connect to by setting `address`.")
sys.exit(1)
elif not redis_addresses:
raise ConnectionError(
"Could not find any running Ray instance. "
"Please specify the one to connect to by setting `address`.")
return redis_addresses.pop()
def get_address_info_from_redis_helper(redis_address,
node_ip_address,
redis_password=None):
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine as
# Redis) must have run "CONFIG SET protected-mode no".
redis_client = create_redis_client(redis_address, password=redis_password)
client_table = ray.state._parse_client_table(redis_client)
if len(client_table) == 0:
raise Exception(
"Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if (client_node_ip_address == node_ip_address
or (client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address())):
relevant_client = client_info
break
if relevant_client is None:
raise Exception(
"Redis has started but no raylets have registered yet.")
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"]
}
def get_address_info_from_redis(redis_address,
node_ip_address,
num_retries=5,
redis_password=None):
counter = 0
while True:
try:
return get_address_info_from_redis_helper(
redis_address, node_ip_address, redis_password=redis_password)
except Exception:
if counter == num_retries:
raise
# Some of the information may not be in Redis yet, so wait a little
# bit.
logger.warning(
"Some processes that the driver needs to connect to have "
"not registered with Redis, so retrying. Have you run "
"'ray start' on this node?")
time.sleep(1)
counter += 1
def get_webui_url_from_redis(redis_client):
webui_url = redis_client.hmget("webui", "url")[0]
return ray.utils.decode(webui_url) if webui_url is not None else None
def remaining_processes_alive():
"""See if the remaining processes are alive or not.
Note that this ignores processes that have been explicitly killed,
e.g., via a command like node.kill_raylet().
Returns:
True if the remaining processes started by ray.init() are alive and
False otherwise.
Raises:
Exception: An exception is raised if the processes were not started by
ray.init().
"""
if ray.worker._global_node is None:
raise Exception("This process is not in a position to determine "
"whether all processes are alive or not.")
return ray.worker._global_node.remaining_processes_alive()
def validate_redis_address(address, redis_address):
"""Validates redis address parameter and splits it into host/ip components.
We temporarily support both 'address' and 'redis_address', so both are
handled here.
Returns:
redis_address: string containing the full <host:port> address.
redis_ip: string representing the host portion of the address.
redis_port: integer representing the port portion of the address.
Raises:
ValueError: if both address and redis_address were specified or the
address was malformed.
"""
if redis_address == "auto":
raise ValueError("auto address resolution not supported for "
"redis_address parameter. Please use address.")
if address:
if redis_address:
raise ValueError(
"Both address and redis_address specified. Use only address.")
if address == "auto":
address = find_redis_address_or_die()
redis_address = address
redis_address = address_to_ip(redis_address)
redis_address_parts = redis_address.split(":")
if len(redis_address_parts) != 2:
raise ValueError("Malformed address. Expected '<host>:<port>'.")
redis_ip = redis_address_parts[0]
try:
redis_port = int(redis_address_parts[1])
except ValueError:
raise ValueError("Malformed address port. Must be an integer.")
if redis_port < 1024 or redis_port > 65535:
raise ValueError("Invalid address port. Must "
"be between 1024 and 65535.")
return redis_address, redis_ip, redis_port
def address_to_ip(address):
"""Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
"""
address_parts = address.split(":")
ip_address = socket.gethostbyname(address_parts[0])
# Make sure localhost isn't resolved to the loopback ip
if ip_address == "127.0.0.1":
ip_address = get_node_ip_address()
return ":".join([ip_address] + address_parts[1:])
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == 101:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address
def create_redis_client(redis_address, password=None):
"""Create a Redis client.
Args:
The IP address, port, and password of the Redis server.
Returns:
A Redis client.
"""
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine
# as Redis) must have run "CONFIG SET protected-mode no".
return redis.StrictRedis(
host=redis_ip_address, port=int(redis_port), password=password)
def start_ray_process(command,
process_type,
env_updates=None,
cwd=None,
use_valgrind=False,
use_gdb=False,
use_valgrind_profiler=False,
use_perftools_profiler=False,
use_tmux=False,
stdout_file=None,
stderr_file=None,
pipe_stdin=False):
"""Start one of the Ray processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command (List[str]): The command to use to start the Ray process.
process_type (str): The type of the process that is being started
(e.g., "raylet").
env_updates (dict): A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd (str): The directory to run the process in.
use_valgrind (bool): True if we should start the process in valgrind.
use_gdb (bool): True if we should start the process in gdb.
use_valgrind_profiler (bool): True if we should start the process in
the valgrind profiler.
use_perftools_profiler (bool): True if we should profile the process
using perftools.
use_tmux (bool): True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
pipe_stdin: If true, subprocess.PIPE will be passed to the process as
stdin.
Returns:
Information about the process that was started including a handle to
the process that was started.
"""
# Detect which flags are set through environment variables.
valgrind_env_var = "RAY_{}_VALGRIND".format(process_type.upper())
if os.environ.get(valgrind_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_env_var)
use_valgrind = True
valgrind_profiler_env_var = "RAY_{}_VALGRIND_PROFILER".format(
process_type.upper())
if os.environ.get(valgrind_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
valgrind_profiler_env_var)
use_valgrind_profiler = True
perftools_profiler_env_var = "RAY_{}_PERFTOOLS_PROFILER".format(
process_type.upper())
if os.environ.get(perftools_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
perftools_profiler_env_var)
use_perftools_profiler = True
tmux_env_var = "RAY_{}_TMUX".format(process_type.upper())
if os.environ.get(tmux_env_var) == "1":
logger.info("Detected environment variable '%s'.", tmux_env_var)
use_tmux = True
gdb_env_var = "RAY_{}_GDB".format(process_type.upper())
if os.environ.get(gdb_env_var) == "1":
logger.info("Detected environment variable '%s'.", gdb_env_var)
use_gdb = True
if sum(
[use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler
]) > 1:
raise ValueError(
"At most one of the 'use_gdb', 'use_valgrind', "
"'use_valgrind_profiler', and 'use_perftools_profiler' flags can "
"be used at a time.")
if env_updates is None:
env_updates = {}
if not isinstance(env_updates, dict):
raise ValueError("The 'env_updates' argument must be a dictionary.")
modified_env = os.environ.copy()
modified_env.update(env_updates)
if use_gdb:
if not use_tmux:
raise ValueError(
"If 'use_gdb' is true, then 'use_tmux' must be true as well.")
# TODO(suquark): Any better temp file creation here?
gdb_init_path = "/tmp/ray/gdb_init_{}_{}".format(
process_type, time.time())
ray_process_path = command[0]
ray_process_args = command[1:]
run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args])
with open(gdb_init_path, "w") as gdb_init_file:
gdb_init_file.write("run {}".format(run_args))
command = ["gdb", ray_process_path, "-x", gdb_init_path]
if use_valgrind:
command = [
"valgrind", "--track-origins=yes", "--leak-check=full",
"--show-leak-kinds=all", "--leak-check-heuristics=stdstring",
"--error-exitcode=1"
] + command
if use_valgrind_profiler:
command = ["valgrind", "--tool=callgrind"] + command
if use_perftools_profiler:
modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"]
modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"]
if use_tmux:
# The command has to be created exactly as below to ensure that it
# works on all versions of tmux. (Tested with tmux 1.8-5, travis'
# version, and tmux 2.1)
command = ["tmux", "new-session", "-d", "{}".format(" ".join(command))]
# Block sigint for spawned processes so they aren't killed by the SIGINT
# propagated from the shell on Ctrl-C so we can handle KeyboardInterrupts
# in interactive sessions. This is only supported in Python 3.3 and above.
def block_sigint():
import signal
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
process = subprocess.Popen(
command,
env=modified_env,
cwd=cwd,
stdout=stdout_file,
stderr=stderr_file,
stdin=subprocess.PIPE if pipe_stdin else None,
preexec_fn=block_sigint)
return ProcessInfo(
process=process,
stdout_file=stdout_file.name if stdout_file is not None else None,
stderr_file=stderr_file.name if stderr_file is not None else None,
use_valgrind=use_valgrind,
use_gdb=use_gdb,
use_valgrind_profiler=use_valgrind_profiler,
use_perftools_profiler=use_perftools_profiler,
use_tmux=use_tmux)
def wait_for_redis_to_start(redis_ip_address,
redis_port,
password=None,
num_retries=5):
"""Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
"""
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port, password=password)
# Wait for the Redis server to start.
counter = 0
while counter < num_retries:
try:
# Run some random command and see if it worked.
logger.debug(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError:
# Wait a little bit.
time.sleep(1)
logger.info("Failed to connect to the redis server, retrying.")
counter += 1
else:
break
if counter == num_retries:
raise Exception("Unable to connect to Redis. If the Redis instance is "
"on a different machine, check that your firewall is "
"configured properly.")
def _compute_version_info():
"""Compute the versions of Python, pyarrow, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
pyarrow_version = pyarrow.__version__
return ray_version, python_version, pyarrow_version
def _put_version_info_in_redis(redis_client):
"""Store version information in Redis.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray.
Args:
redis_client: A client for the primary Redis shard.
"""
redis_client.set("VERSION_INFO", json.dumps(_compute_version_info()))
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" Ray: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
" Pyarrow: " + str(true_version_info[2]) + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
" Pyarrow: " + str(version_info[2]))
if version_info[:2] != true_version_info[:2]:
raise Exception(error_message)
else:
logger.warning(error_message)
def start_reaper():
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that ray processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other ray processes without killing the process group of the
# process that started us.
try:
os.setpgrp()
except OSError as e:
logger.warning("setpgrp failed, processes may not be "
"cleaned up properly: {}.".format(e))
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "ray_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_ray_process(
command, ray_constants.PROCESS_TYPE_REAPER, pipe_stdin=True)
return process_info
def start_redis(node_ip_address,
redirect_files,
resource_spec,
port=None,
redis_shard_ports=None,
num_redis_shards=1,
redis_max_clients=None,
redirect_worker_output=False,
password=None,
use_credis=None,
include_java=False):
"""Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
redirect_files: The list of (stdout, stderr) file pairs.
resource_spec (ResourceSpec): Resources for the node.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
use_credis: If True, additionally load the chain-replicated libraries
into the redis servers. Defaults to None, which means its value is
set by the presence of "RAY_USE_NEW_GCS" in os.environ.
include_java (bool): If True, the raylet backend can also support
Java worker.
Returns:
A tuple of the address for the primary Redis shard, a list of
addresses for the remaining shards, and the processes that were
started.
"""
if len(redirect_files) != 1 + num_redis_shards:
raise ValueError("The number of redirect file pairs should be equal "
"to the number of redis shards (including the "
"primary shard) we will start.")
if redis_shard_ports is None:
redis_shard_ports = num_redis_shards * [None]
elif len(redis_shard_ports) != num_redis_shards:
raise Exception("The number of Redis shard ports does not match the "
"number of Redis shards.")
processes = []
if use_credis is None:
use_credis = ("RAY_USE_NEW_GCS" in os.environ)
if use_credis:
if password is not None:
# TODO(pschafhalter) remove this once credis supports
# authenticating Redis ports
raise Exception("Setting the `redis_password` argument is not "
"supported in credis. To run Ray with "
"password-protected Redis ports, ensure that "
"the environment variable `RAY_USE_NEW_GCS=off`.")
assert num_redis_shards == 1, (
"For now, RAY_USE_NEW_GCS supports 1 shard, and credis "
"supports 1-node chain for that shard only.")
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# TODO(suquark): We need credis here because some symbols need to be
# imported from credis dynamically through dlopen when Ray is built
# with RAY_USE_NEW_GCS=on. We should remove them later for the primary
# shard.
# See src/ray/gcs/redis_module/ray_redis_module.cc
redis_modules = [CREDIS_MASTER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_stdout_file, redis_stderr_file = redirect_files[0]
# Start the primary Redis shard.
port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=port,
password=password,
redis_max_clients=redis_max_clients,
# Below we use None to indicate no limit on the memory of the
# primary Redis shard.
redis_max_memory=None,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file)
processes.append(p)
redis_address = address(node_ip_address, port)
# Register the number of Redis shards in the primary shard, so that clients
# know how many redis shards to expect under RedisShards.
primary_redis_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
primary_redis_client.set("NumRedisShards", str(num_redis_shards))
# Put the redirect_worker_output bool in the Redis shard so that workers
# can access it and know whether or not to redirect their output.
primary_redis_client.set("RedirectOutput", 1
if redirect_worker_output else 0)
# put the include_java bool to primary redis-server, so that other nodes
# can access it and know whether or not to enable cross-languages.
primary_redis_client.set("INCLUDE_JAVA", 1 if include_java else 0)
# Init job counter to GCS.
primary_redis_client.set("JobCounter", 0)
# Store version information in the primary Redis shard.
_put_version_info_in_redis(primary_redis_client)
# Calculate the redis memory.
assert resource_spec.resolved()
redis_max_memory = resource_spec.redis_max_memory
# Start other Redis shards. Each Redis shard logs to a separate file,
# prefixed by "redis-<shard number>".
redis_shards = []
for i in range(num_redis_shards):
redis_stdout_file, redis_stderr_file = redirect_files[i + 1]
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# It is important to load the credis module BEFORE the ray module,
# as the latter contains an extern declaration that the former
# supplies.
redis_modules = [CREDIS_MEMBER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_shard_port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=redis_shard_ports[i],
password=password,
redis_max_clients=redis_max_clients,
redis_max_memory=redis_max_memory,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file)
processes.append(p)
shard_address = address(node_ip_address, redis_shard_port)
redis_shards.append(shard_address)
# Store redis shard information in the primary redis shard.
primary_redis_client.rpush("RedisShards", shard_address)
if use_credis:
# Configure the chain state. The way it is intended to work is
# the following:
#
# PRIMARY_SHARD
#
# SHARD_1 (master replica) -> SHARD_1 (member replica)
# -> SHARD_1 (member replica)
#
# SHARD_2 (master replica) -> SHARD_2 (member replica)
# -> SHARD_2 (member replica)
# ...
#
#
# If we have credis members in future, their modules should be:
# [CREDIS_MEMBER_MODULE, REDIS_MODULE], and they will be initialized by
# execute_command("MEMBER.CONNECT_TO_MASTER", node_ip_address, port)
#
# Currently we have num_redis_shards == 1, so only one chain will be
# created, and the chain only contains master.
# TODO(suquark): Currently, this is not correct because we are
# using the master replica as the primary shard. This should be
# fixed later. I had tried to fix it but failed because of heartbeat
# issues.
primary_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
shard_client = redis.StrictRedis(
host=node_ip_address, port=redis_shard_port, password=password)
primary_client.execute_command("MASTER.ADD", node_ip_address,
redis_shard_port)
shard_client.execute_command("MEMBER.CONNECT_TO_MASTER",
node_ip_address, port)
return redis_address, redis_shards, processes
def _start_redis_instance(executable,
modules,
port=None,
redis_max_clients=None,
num_retries=20,
stdout_file=None,
stderr_file=None,
password=None,
redis_max_memory=None):
"""Start a single Redis server.
Notes:
If "port" is not None, then we will only use this port and try
only once. Otherwise, random ports will be used and the maximum
retries count is "num_retries".
Args:
executable (str): Full path of the redis-server executable.
modules (list of str): A list of pathnames, pointing to the redis
module(s) that will be loaded in this redis server.
port (int): If provided, start a Redis server with this port.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis. If a
port is provided, this defaults to 1.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries.
Returns:
A tuple of the port used by Redis and ProcessInfo for the process that
was started. If a port is passed in, then the returned port value
is the same.
Raises:
Exception: An exception is raised if Redis could not be started.
"""
assert os.path.isfile(executable)
for module in modules:
assert os.path.isfile(module)
counter = 0
if port is not None:
# If a port is specified, then try only once to connect.
# This ensures that we will use the given port.
num_retries = 1
else:
port = new_port()
load_module_args = []
for module in modules:
load_module_args += ["--loadmodule", module]
while counter < num_retries:
if counter > 0:
logger.warning("Redis failed to start, retrying now.")
# Construct the command to start the Redis server.
command = [executable]
if password:
if " " in password:
raise ValueError("Spaces not permitted in redis password.")
command += ["--requirepass", password]
command += (
["--port", str(port), "--loglevel", "warning"] + load_module_args)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REDIS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file)
time.sleep(0.1)
# Check if Redis successfully started (or at least if it the executable
# did not exit within 0.1 seconds).
if process_info.process.poll() is None:
break
port = new_port()
counter += 1
if counter == num_retries:
raise Exception("Couldn't start Redis. Check log files: {} {}".format(
stdout_file.name, stderr_file.name))
# Create a Redis client just for configuring Redis.
redis_client = redis.StrictRedis(
host="127.0.0.1", port=port, password=password)
# Wait for the Redis server to start.
wait_for_redis_to_start("127.0.0.1", port, password=password)
# Configure Redis to generate keyspace notifications. TODO(rkn): Change
# this to only generate notifications for the export keys.
redis_client.config_set("notify-keyspace-events", "Kl")
# Configure Redis to not run in protected mode so that processes on other
# hosts can connect to it. TODO(rkn): Do this in a more secure way.
redis_client.config_set("protected-mode", "no")
# Discard old task and object metadata.
if redis_max_memory is not None:
redis_client.config_set("maxmemory", str(redis_max_memory))
redis_client.config_set("maxmemory-policy", "allkeys-lru")
redis_client.config_set("maxmemory-samples", "10")
logger.debug("Starting Redis shard with {} GB max memory.".format(
round(redis_max_memory / 1e9, 2)))
# If redis_max_clients is provided, attempt to raise the number of maximum
# number of Redis clients.
if redis_max_clients is not None:
redis_client.config_set("maxclients", str(redis_max_clients))
else:
# If redis_max_clients is not provided, determine the current ulimit.
# We will use this to attempt to raise the maximum number of Redis
# clients.
current_max_clients = int(
redis_client.config_get("maxclients")["maxclients"])
# The below command should be the same as doing ulimit -n.
ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# The quantity redis_client_buffer appears to be the required buffer
# between the maximum number of redis clients and ulimit -n. That is,
# if ulimit -n returns 10000, then we can set maxclients to
# 10000 - redis_client_buffer.
redis_client_buffer = 32
if current_max_clients < ulimit_n - redis_client_buffer:
redis_client.config_set("maxclients",
ulimit_n - redis_client_buffer)
# Increase the hard and soft limits for the redis client pubsub buffer to
# 128MB. This is a hack to make it less likely for pubsub messages to be
# dropped and for pubsub connections to therefore be killed.
cur_config = (redis_client.config_get("client-output-buffer-limit")[
"client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
return port, process_info
def start_log_monitor(redis_address,
logs_dir,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
log_monitor_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "log_monitor.py")
command = [
sys.executable, "-u", log_monitor_filepath,
"--redis-address={}".format(redis_address),
"--logs-dir={}".format(logs_dir)
]
if redis_password:
command += ["--redis-password", redis_password]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def start_reporter(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a reporter process.
Args:
redis_address (str): The address of the Redis instance.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
reporter_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "reporter.py")
command = [
sys.executable, "-u", reporter_filepath,
"--redis-address={}".format(redis_address)
]
if redis_password:
command += ["--redis-password", redis_password]
try:
import psutil # noqa: F401
except ImportError:
logger.warning("Failed to start the reporter. The reporter requires "
"'pip install psutil'.")
return None
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REPORTER,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def start_dashboard(require_webui,
host,
redis_address,
temp_dir,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a dashboard process.
Args:
require_webui (bool): If true, this will raise an exception if we fail
to start the webui. Otherwise it will print a warning if we fail
to start the webui.
host (str): The host to bind the dashboard web server to.
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
port = 8265 # Note: list(map(ord, "RAY")) == [82, 65, 89]
while True:
try:
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
dashboard_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py")
command = [
sys.executable,
"-u",
dashboard_filepath,
"--host={}".format(host),
"--port={}".format(port),
"--redis-address={}".format(redis_address),
"--temp-dir={}".format(temp_dir),
]
if redis_password:
command += ["--redis-password", redis_password]
webui_dependencies_present = True
try:
import aiohttp # noqa: F401
import psutil # noqa: F401
import setproctitle # noqa: F401
import grpc # noqa: F401
except ImportError:
webui_dependencies_present = False
warning_message = (
"Failed to start the dashboard. The dashboard requires Python 3 "
"as well as 'pip install aiohttp psutil setproctitle grpcio'.")
if require_webui:
raise ImportError(warning_message)
else:
logger.warning(warning_message)
if webui_dependencies_present:
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file)
dashboard_url = "{}:{}".format(
host if host != "0.0.0.0" else get_node_ip_address(), port)
logger.info("View the Ray dashboard at {}{}{}{}{}".format(
colorama.Style.BRIGHT, colorama.Fore.GREEN, dashboard_url,
colorama.Fore.RESET, colorama.Style.NORMAL))
return dashboard_url, process_info
else:
return None, None
def start_raylet(redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
resource_spec,
object_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
use_pickle=False):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
use_pickle (bool): If True, use cloudpickle for serialization.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static))
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()])
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
java_worker_options = (java_worker_options
or DEFAULT_JAVA_WORKER_OPTIONS)
java_worker_command = build_java_worker_command(
java_worker_options,
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
java_worker_command = ""
# Create the command that the Raylet will use to start workers.
start_worker_command = ("{} {} "
"--node-ip-address={} "
"--node-manager-port={} "
"--object-store-name={} "
"--raylet-name={} "
"--redis-address={} "
"--config-list={} "
"--temp-dir={}".format(
sys.executable, worker_path, node_ip_address,
node_manager_port, plasma_store_name,
raylet_name, redis_address, config_str,
temp_dir))
if redis_password:
start_worker_command += " --redis-password {}".format(redis_password)
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if load_code_from_local:
start_worker_command += " --load-code-from-local "
if use_pickle:
start_worker_command += " --use-pickle "
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(start_worker_command),
"--java_worker_command={}".format(java_worker_command),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
"--session_dir={}".format(session_dir),
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def get_ray_jars_dir():
"""Return a directory where all ray-related jars and
their dependencies locate."""
current_dir = os.path.abspath(os.path.dirname(__file__))
jars_dir = os.path.abspath(os.path.join(current_dir, "jars"))
if not os.path.exists(jars_dir):
raise Exception("Ray jars is not packaged into ray. "
"Please build ray with java enabled "
"(set env var RAY_INSTALL_JAVA=1)")
return os.path.abspath(os.path.join(current_dir, "jars"))
def build_java_worker_command(
java_worker_options,
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
):
"""This method assembles the command used to start a Java worker.
Args:
java_worker_options (str): The command options for Java worker.
redis_address (str): Redis address of GCS.
plasma_store_name (str): The name of the plasma store socket to connect
to.
raylet_name (str): The name of the raylet socket to create.
redis_password (str): The password of connect to redis.
session_dir (str): The path of this session.
Returns:
The command string for starting Java worker.
"""
command = "java "
if redis_address is not None:
command += "-Dray.redis.address={} ".format(redis_address)
command += "-Dray.raylet.node-manager-port={} ".format(node_manager_port)
if plasma_store_name is not None:
command += (
"-Dray.object-store.socket-name={} ".format(plasma_store_name))
if raylet_name is not None:
command += "-Dray.raylet.socket-name={} ".format(raylet_name)
if redis_password is not None:
command += "-Dray.redis.password={} ".format(redis_password)
command += "-Dray.home={} ".format(RAY_HOME)
command += "-Dray.log-dir={} ".format(os.path.join(session_dir, "logs"))
command += ("-Dray.raylet.config.num_workers_per_process_java=" +
"RAY_WORKER_NUM_WORKERS_PLACEHOLDER ")
# Add ray jars path to java classpath
ray_jars = os.path.join(get_ray_jars_dir(), "*")
cp_sep = ":"
import platform
if platform.system() == "Windows":
cp_sep = ";"
if java_worker_options is None:
java_worker_options = ""
options = re.split("\\s+", java_worker_options)
cp_index = -1
for i in range(len(options)):
option = options[i]
if option == "-cp" or option == "-classpath":
cp_index = i + 1
break
if cp_index != -1:
options[cp_index] = options[cp_index] + cp_sep + ray_jars
else:
options = ["-cp", ray_jars] + options
java_worker_options = " ".join(options)
# Put `java_worker_options` in the last, so it can overwrite the
# above options.
command += java_worker_options + " "
command += "RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_0 "
command += "org.ray.runtime.runner.worker.DefaultWorker"
return command
def determine_plasma_store_config(object_store_memory,
plasma_directory=None,
huge_pages=False):
"""Figure out how to configure the plasma object store.
This will determine which directory to use for the plasma store. On Linux,
we will try to use /dev/shm unless the shared memory file system is too
small, in which case we will fall back to /tmp. If any of the object store
memory or plasma directory parameters are specified by the user, then those
values will be preserved.
Args:
object_store_memory (int): The objec store memory to use.
plasma_directory (str): The user-specified plasma directory parameter.
huge_pages (bool): The user-specified huge pages parameter.
Returns:
The plasma directory to use. If it is specified by the user, then that
value will be preserved.
"""
system_memory = ray.utils.get_system_memory()
# Determine which directory to use. By default, use /tmp on MacOS and
# /dev/shm on Linux, unless the shared-memory file system is too small,
# in which case we default to /tmp on Linux.
if plasma_directory is None:
if sys.platform == "linux" or sys.platform == "linux2":
shm_avail = ray.utils.get_shared_memory_bytes()
# Compare the requested memory size to the memory available in
# /dev/shm.
if shm_avail > object_store_memory:
plasma_directory = "/dev/shm"
else:
plasma_directory = "/tmp"
logger.warning(
"WARNING: The object store is using /tmp instead of "
"/dev/shm because /dev/shm has only {} bytes available. "
"This may slow down performance! You may be able to free "
"up space by deleting files in /dev/shm or terminating "
"any running plasma_store_server processes. If you are "
"inside a Docker container, you may need to pass an "
"argument with the flag '--shm-size' to 'docker run'.".
format(shm_avail))
else:
plasma_directory = "/tmp"
# Do some sanity checks.
if object_store_memory > system_memory:
raise Exception(
"The requested object store memory size is greater "
"than the total available memory.")
else:
plasma_directory = os.path.abspath(plasma_directory)
logger.warning("WARNING: object_store_memory is not verified when "
"plasma_directory is set.")
if not os.path.isdir(plasma_directory):
raise Exception(
"The file {} does not exist or is not a directory.".format(
plasma_directory))
return plasma_directory
def _start_plasma_store(plasma_store_memory,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
plasma_directory=None,
huge_pages=False,
socket_name=None):
"""Start a plasma store process.
Args:
plasma_store_memory (int): The amount of memory in bytes to start the
plasma store with.
use_valgrind (bool): True if the plasma store should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the plasma store should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: a boolean flag indicating whether to start the
Object Store with hugetlbfs support. Requires plasma_directory.
socket_name (str): If provided, it will specify the socket
name used by the plasma store.
Return:
A tuple of the name of the plasma store socket and ProcessInfo for the
plasma store process.
"""
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
if huge_pages and not (sys.platform == "linux"
or sys.platform == "linux2"):
raise Exception("The huge_pages argument is only supported on "
"Linux.")
if huge_pages and plasma_directory is None:
raise Exception("If huge_pages is True, then the "
"plasma_directory argument must be provided.")
if not isinstance(plasma_store_memory, int):
plasma_store_memory = int(plasma_store_memory)
command = [
PLASMA_STORE_EXECUTABLE, "-s", socket_name, "-m",
str(plasma_store_memory)
]
if plasma_directory is not None:
command += ["-d", plasma_directory]
if huge_pages:
command += ["-h"]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_PLASMA_STORE,
use_valgrind=use_valgrind,
use_valgrind_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def start_plasma_store(resource_spec,
stdout_file=None,
stderr_file=None,
plasma_directory=None,
huge_pages=False,
plasma_store_socket_name=None):
"""This method starts an object store process.
Args:
resource_spec (ResourceSpec): Resources for the node.
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started.
"""
assert resource_spec.resolved()
object_store_memory = resource_spec.object_store_memory
plasma_directory = determine_plasma_store_config(
object_store_memory, plasma_directory, huge_pages)
if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES:
raise ValueError("Attempting to cap object store memory usage at {} "
"bytes, but the minimum allowed is {} bytes.".format(
object_store_memory,
ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES))
# Print the object store memory using two decimal places.
object_store_memory_str = (object_store_memory / 10**7) / 10**2
logger.debug("Starting the Plasma object store with {} GB memory "
"using {}.".format(
round(object_store_memory_str, 2), plasma_directory))
# Start the Plasma store.
process_info = _start_plasma_store(
object_store_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
socket_name=plasma_store_socket_name)
return process_info
def start_worker(node_ip_address,
object_store_name,
raylet_name,
redis_address,
worker_path,
temp_dir,
stdout_file=None,
stderr_file=None):
"""This method starts a worker process.
Args:
node_ip_address (str): The IP address of the node that this worker is
running on.
object_store_name (str): The socket name of the object store.
raylet_name (str): The socket name of the raylet server.
redis_address (str): The address that the Redis server is listening on.
worker_path (str): The path of the source code which the worker process
will run.
temp_dir (str): The path of the temp dir.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
ProcessInfo for the process that was started.
"""
command = [
sys.executable, "-u", worker_path,
"--node-ip-address=" + node_ip_address,
"--object-store-name=" + object_store_name,
"--raylet-name=" + raylet_name,
"--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_WORKER,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def start_monitor(redis_address,
stdout_file=None,
stderr_file=None,
autoscaling_config=None,
redis_password=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
autoscaling_config: path to autoscaling config file.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
monitor_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "monitor.py")
command = [
sys.executable, "-u", monitor_path,
"--redis-address=" + str(redis_address)
]
if autoscaling_config:
command.append("--autoscaling-config=" + str(autoscaling_config))
if redis_password:
command.append("--redis-password=" + redis_password)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
def start_raylet_monitor(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None,
config=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
config (dict|None): Optional configuration that will
override defaults in RayConfig.
Returns:
ProcessInfo for the process that was started.
"""
gcs_ip_address, gcs_port = redis_address.split(":")
redis_password = redis_password or ""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
command = [
RAYLET_MONITOR_EXECUTABLE,
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--config_list={}".format(config_str),
]
if redis_password:
command += ["--redis_password={}".format(redis_password)]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/setup-dev.py
|
Python
|
#!/usr/bin/env python
"""This script allows you to develop RLlib without needing to compile Ray."""
import argparse
import click
import os
import subprocess
import ray
def do_link(package, force=False, local_path=""):
package_home = os.path.abspath(
os.path.join(ray.__file__, "../{}".format(package)))
local_home = os.path.abspath(
os.path.join(__file__, local_path + "../{}".format(package)))
if not os.path.isdir(package_home):
print("{} does not exist. Continuing to link.".format(package_home))
assert os.path.isdir(local_home), local_home
if not force and not click.confirm(
"This will replace:\n {}\nwith a symlink to:\n {}".format(
package_home, local_home),
default=True):
return
if os.access(os.path.dirname(package_home), os.W_OK):
subprocess.check_call(["rm", "-rf", package_home])
subprocess.check_call(["ln", "-s", local_home, package_home])
else:
print("You don't have write permission to {}, using sudo:".format(
package_home))
subprocess.check_call(["sudo", "rm", "-rf", package_home])
subprocess.check_call(["sudo", "ln", "-s", local_home, package_home])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Setup dev.")
parser.add_argument(
"--yes", action="store_true", help="Don't ask for confirmation.")
args = parser.parse_args()
do_link("rllib", force=args.yes, local_path="../../")
do_link("tune", force=args.yes)
do_link("autoscaler", force=args.yes)
do_link("scripts", force=args.yes)
do_link("internal", force=args.yes)
do_link("tests", force=args.yes)
do_link("experimental", force=args.yes)
print("Created links.\n\nIf you run into issues initializing Ray, please "
"ensure that your local repo and the installed Ray are in sync "
"(pip install -U the latest wheels at "
"https://ray.readthedocs.io/en/latest/installation.html, "
"and ensure you are up-to-date on the master branch on git).\n\n"
"Note that you may need to delete the package symlinks when pip "
"installing new Ray versions to prevent pip from overwriting files "
"in your git repo.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/signature.py
|
Python
|
from collections import namedtuple
import funcsigs
from funcsigs import Parameter
import logging
from ray.utils import is_cython
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
RayParameter = namedtuple(
"RayParameter",
["name", "kind_int", "default", "annotation", "partial_kwarg"])
"""This class is used to represent a function parameter in Ray.
Note that this is different from the funcsigs.Parameter object because
we replace the funcsigs ParameterKind with an int. This is needed because
ParameterKind objects are currently non-serializable and the package is not
being updated. Replacement is done in `_scrub_parameters` and
`_restore_parameters`.
Attributes:
name (str): The name of the parameter as a string.
kind (int): Describes how argument values are bound to the parameter. See
funcsigs.Parameter and `_convert_to_parameter_kind`.
default (object): The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
annotation: The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
partial_kwarg (bool): True if the parameter is mapped
by 'functools.partial'.
"""
# This dummy type is also defined in ArgumentsBuilder.java. Please keep it
# synced.
DUMMY_TYPE = b"__RAY_DUMMY__"
def get_signature(func):
"""Get signature parameters.
Support Cython functions by grabbing relevant attributes from the Cython
function and attaching to a no-op function. This is somewhat brittle, since
funcsigs may change, but given that funcsigs is written to a PEP, we hope
it is relatively stable. Future versions of Python may allow overloading
the inspect 'isfunction' and 'ismethod' functions / create ABC for Python
functions. Until then, it appears that Cython won't do anything about
compatability with the inspect module.
Args:
func: The function whose signature should be checked.
Returns:
A function signature object, which includes the names of the keyword
arguments as well as their default values.
Raises:
TypeError: A type error if the signature is not supported
"""
# The first condition for Cython functions, the latter for Cython instance
# methods
if is_cython(func):
attrs = [
"__code__", "__annotations__", "__defaults__", "__kwdefaults__"
]
if all(hasattr(func, attr) for attr in attrs):
original_func = func
def func():
return
for attr in attrs:
setattr(func, attr, getattr(original_func, attr))
else:
raise TypeError("{!r} is not a Python function we can process"
.format(func))
return funcsigs.signature(func)
def extract_signature(func, ignore_first=False):
"""Extract the function signature from the function.
Args:
func: The function whose signature should be extracted.
ignore_first: True if the first argument should be ignored. This should
be used when func is a method of a class.
Returns:
List of RayParameter objects representing the function signature.
"""
signature_parameters = list(get_signature(func).parameters.values())
if ignore_first:
if len(signature_parameters) == 0:
raise Exception("Methods must take a 'self' argument, but the "
"method '{}' does not have one.".format(
func.__name__))
signature_parameters = signature_parameters[1:]
return _scrub_parameters(signature_parameters)
def flatten_args(signature_parameters, args, kwargs):
"""Validates the arguments against the signature and flattens them.
The flat list representation is a serializable format for arguments.
Since the flatbuffer representation of function arguments is a list, we
combine both keyword arguments and positional arguments. We represent
this with two entries per argument value - [DUMMY_TYPE, x] for positional
arguments and [KEY, VALUE] for keyword arguments. See the below example.
See `recover_args` for logic restoring the flat list back to args/kwargs.
Args:
signature_parameters (list): The list of RayParameter objects
representing the function signature, obtained from
`extract_signature`.
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
Returns:
List of args and kwargs. Non-keyword arguments are prefixed
by internal enum DUMMY_TYPE.
Raises:
TypeError: Raised if arguments do not fit in the function signature.
Example:
>>> flatten_args([1, 2, 3], {"a": 4})
[None, 1, None, 2, None, 3, "a", 4]
"""
restored = _restore_parameters(signature_parameters)
reconstructed_signature = funcsigs.Signature(parameters=restored)
try:
reconstructed_signature.bind(*args, **kwargs)
except TypeError as exc:
raise TypeError(str(exc))
list_args = []
for arg in args:
list_args += [DUMMY_TYPE, arg]
for keyword, arg in kwargs.items():
list_args += [keyword, arg]
return list_args
def recover_args(flattened_args):
"""Recreates `args` and `kwargs` from the flattened arg list.
Args:
flattened_args: List of args and kwargs. This should be the output of
`flatten_args`.
Returns:
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
"""
assert len(flattened_args) % 2 == 0, (
"Flattened arguments need to be even-numbered. See `flatten_args`.")
args = []
kwargs = {}
for name_index in range(0, len(flattened_args), 2):
name, arg = flattened_args[name_index], flattened_args[name_index + 1]
if name == DUMMY_TYPE:
args.append(arg)
else:
kwargs[name] = arg
return args, kwargs
def _scrub_parameters(parameters):
"""Returns a scrubbed list of RayParameters."""
return [
RayParameter(
name=param.name,
kind_int=_convert_from_parameter_kind(param.kind),
default=param.default,
annotation=param.annotation,
partial_kwarg=param._partial_kwarg) for param in parameters
]
def _restore_parameters(ray_parameters):
"""Reconstructs the funcsigs.Parameter objects."""
return [
Parameter(
rayparam.name,
_convert_to_parameter_kind(rayparam.kind_int),
default=rayparam.default,
annotation=rayparam.annotation,
_partial_kwarg=rayparam.partial_kwarg)
for rayparam in ray_parameters
]
def _convert_from_parameter_kind(kind):
if kind == Parameter.POSITIONAL_ONLY:
return 0
if kind == Parameter.POSITIONAL_OR_KEYWORD:
return 1
if kind == Parameter.VAR_POSITIONAL:
return 2
if kind == Parameter.KEYWORD_ONLY:
return 3
if kind == Parameter.VAR_KEYWORD:
return 4
def _convert_to_parameter_kind(value):
if value == 0:
return Parameter.POSITIONAL_ONLY
if value == 1:
return Parameter.POSITIONAL_OR_KEYWORD
if value == 2:
return Parameter.VAR_POSITIONAL
if value == 3:
return Parameter.KEYWORD_ONLY
if value == 4:
return Parameter.VAR_KEYWORD
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/state.py
|
Python
|
from collections import defaultdict
import json
import logging
import sys
import time
import ray
from ray.function_manager import FunctionDescriptor
from ray import (
gcs_utils,
services,
)
from ray.utils import (decode, binary_to_object_id, binary_to_hex,
hex_to_binary)
logger = logging.getLogger(__name__)
def _parse_client_table(redis_client):
"""Read the client table.
Args:
redis_client: A client to the primary Redis shard.
Returns:
A list of information about the nodes in the cluster.
"""
NIL_CLIENT_ID = ray.ClientID.nil().binary()
message = redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("CLIENT"), "",
NIL_CLIENT_ID)
# Handle the case where no clients are returned. This should only
# occur potentially immediately after the cluster is started.
if message is None:
return []
node_info = {}
gcs_entry = gcs_utils.GcsEntry.FromString(message)
ordered_node_ids = []
# Since GCS entries are append-only, we override so that
# only the latest entries are kept.
for entry in gcs_entry.entries:
item = gcs_utils.GcsNodeInfo.FromString(entry)
node_id = ray.utils.binary_to_hex(item.node_id)
if item.state == gcs_utils.GcsNodeInfo.GcsNodeState.Value("ALIVE"):
ordered_node_ids.append(node_id)
node_info[node_id] = {
"NodeID": node_id,
"Alive": True,
"NodeManagerAddress": item.node_manager_address,
"NodeManagerHostname": item.node_manager_hostname,
"NodeManagerPort": item.node_manager_port,
"ObjectManagerPort": item.object_manager_port,
"ObjectStoreSocketName": item.object_store_socket_name,
"RayletSocketName": item.raylet_socket_name
}
# If this node is being removed, then it must
# have previously been inserted, and
# it cannot have previously been removed.
else:
assert node_id in node_info, "node not found!"
assert node_info[node_id]["Alive"], (
"Unexpected duplicate removal of node.")
node_info[node_id]["Alive"] = False
# Fill resource info.
for node_id in ordered_node_ids:
if node_info[node_id]["Alive"]:
resources = _parse_resource_table(redis_client, node_id)
else:
resources = {}
node_info[node_id]["Resources"] = resources
# NOTE: We return the list comprehension below instead of simply doing
# 'list(node_info.values())' in order to have the nodes appear in the order
# that they joined the cluster. Python dictionaries do not preserve
# insertion order. We could use an OrderedDict, but then we'd have to be
# sure to only insert a given node a single time (clients that die appear
# twice in the GCS log).
return [node_info[node_id] for node_id in ordered_node_ids]
def _parse_resource_table(redis_client, client_id):
"""Read the resource table with given client id.
Args:
redis_client: A client to the primary Redis shard.
client_id: The client ID of the node in hex.
Returns:
A dict of resources about this node.
"""
message = redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("NODE_RESOURCE"), "",
ray.utils.hex_to_binary(client_id))
if message is None:
return {}
resources = {}
gcs_entry = gcs_utils.GcsEntry.FromString(message)
entries_len = len(gcs_entry.entries)
if entries_len % 2 != 0:
raise Exception("Invalid entry size for resource lookup: " +
str(entries_len))
for i in range(0, entries_len, 2):
resource_table_data = gcs_utils.ResourceTableData.FromString(
gcs_entry.entries[i + 1])
resources[decode(
gcs_entry.entries[i])] = resource_table_data.resource_capacity
return resources
class GlobalState:
"""A class used to interface with the Ray control state.
# TODO(zongheng): In the future move this to use Ray's redis module in the
# backend to cut down on # of request RPCs.
Attributes:
redis_client: The Redis client used to query the primary redis server.
redis_clients: Redis clients for each of the Redis shards.
"""
def __init__(self):
"""Create a GlobalState object."""
# The redis server storing metadata, such as function table, client
# table, log files, event logs, workers/actions info.
self.redis_client = None
# Clients for the redis shards, storing the object table & task table.
self.redis_clients = None
def _check_connected(self):
"""Check that the object has been initialized before it is used.
Raises:
Exception: An exception is raised if ray.init() has not been called
yet.
"""
if self.redis_client is None:
raise Exception("The ray global state API cannot be used before "
"ray.init has been called.")
if self.redis_clients is None:
raise Exception("The ray global state API cannot be used before "
"ray.init has been called.")
def disconnect(self):
"""Disconnect global state from GCS."""
self.redis_client = None
self.redis_clients = None
def _initialize_global_state(self,
redis_address,
redis_password=None,
timeout=20):
"""Initialize the GlobalState object by connecting to Redis.
It's possible that certain keys in Redis may not have been fully
populated yet. In this case, we will retry this method until they have
been populated or we exceed a timeout.
Args:
redis_address: The Redis address to connect.
redis_password: The password of the redis server.
"""
self.redis_client = services.create_redis_client(
redis_address, redis_password)
start_time = time.time()
num_redis_shards = None
redis_shard_addresses = []
while time.time() - start_time < timeout:
# Attempt to get the number of Redis shards.
num_redis_shards = self.redis_client.get("NumRedisShards")
if num_redis_shards is None:
print("Waiting longer for NumRedisShards to be populated.")
time.sleep(1)
continue
num_redis_shards = int(num_redis_shards)
if num_redis_shards < 1:
raise Exception("Expected at least one Redis shard, found "
"{}.".format(num_redis_shards))
# Attempt to get all of the Redis shards.
redis_shard_addresses = self.redis_client.lrange(
"RedisShards", start=0, end=-1)
if len(redis_shard_addresses) != num_redis_shards:
print("Waiting longer for RedisShards to be populated.")
time.sleep(1)
continue
# If we got here then we successfully got all of the information.
break
# Check to see if we timed out.
if time.time() - start_time >= timeout:
raise Exception("Timed out while attempting to initialize the "
"global state. num_redis_shards = {}, "
"redis_shard_addresses = {}".format(
num_redis_shards, redis_shard_addresses))
# Get the rest of the information.
self.redis_clients = []
for shard_address in redis_shard_addresses:
self.redis_clients.append(
services.create_redis_client(shard_address.decode(),
redis_password))
def _execute_command(self, key, *args):
"""Execute a Redis command on the appropriate Redis shard based on key.
Args:
key: The object ID or the task ID that the query is about.
args: The command to run.
Returns:
The value returned by the Redis command.
"""
client = self.redis_clients[key.redis_shard_hash() % len(
self.redis_clients)]
return client.execute_command(*args)
def _keys(self, pattern):
"""Execute the KEYS command on all Redis shards.
Args:
pattern: The KEYS pattern to query.
Returns:
The concatenated list of results from all shards.
"""
result = []
for client in self.redis_clients:
result.extend(list(client.scan_iter(match=pattern)))
return result
def _object_table(self, object_id):
"""Fetch and parse the object table information for a single object ID.
Args:
object_id: An object ID to get information about.
Returns:
A dictionary with information about the object ID in question.
"""
# Allow the argument to be either an ObjectID or a hex string.
if not isinstance(object_id, ray.ObjectID):
object_id = ray.ObjectID(hex_to_binary(object_id))
# Return information about a single object ID.
message = self._execute_command(object_id, "RAY.TABLE_LOOKUP",
gcs_utils.TablePrefix.Value("OBJECT"),
"", object_id.binary())
if message is None:
return {}
gcs_entry = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entry.entries) > 0
entry = gcs_utils.ObjectTableData.FromString(gcs_entry.entries[0])
object_info = {
"DataSize": entry.object_size,
"Manager": entry.manager,
}
return object_info
def object_table(self, object_id=None):
"""Fetch and parse the object table info for one or more object IDs.
Args:
object_id: An object ID to fetch information about. If this is
None, then the entire object table is fetched.
Returns:
Information from the object table.
"""
self._check_connected()
if object_id is not None:
# Return information about a single object ID.
return self._object_table(object_id)
else:
# Return the entire object table.
object_keys = self._keys(gcs_utils.TablePrefix_OBJECT_string + "*")
object_ids_binary = {
key[len(gcs_utils.TablePrefix_OBJECT_string):]
for key in object_keys
}
results = {}
for object_id_binary in object_ids_binary:
results[binary_to_object_id(object_id_binary)] = (
self._object_table(binary_to_object_id(object_id_binary)))
return results
def _actor_table(self, actor_id):
"""Fetch and parse the actor table information for a single actor ID.
Args:
actor_id: A actor ID to get information about.
Returns:
A dictionary with information about the actor ID in question.
"""
assert isinstance(actor_id, ray.ActorID)
message = self.redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("ACTOR"), "",
actor_id.binary())
if message is None:
return {}
gcs_entries = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entries.entries) == 1
actor_table_data = gcs_utils.ActorTableData.FromString(
gcs_entries.entries[0])
actor_info = {
"ActorID": binary_to_hex(actor_table_data.actor_id),
"JobID": binary_to_hex(actor_table_data.job_id),
"Address": {
"IPAddress": actor_table_data.address.ip_address,
"Port": actor_table_data.address.port
},
"OwnerAddress": {
"IPAddress": actor_table_data.owner_address.ip_address,
"Port": actor_table_data.owner_address.port
},
"IsDirectCall": actor_table_data.is_direct_call,
"State": actor_table_data.state,
"Timestamp": actor_table_data.timestamp,
}
return actor_info
def actor_table(self, actor_id=None):
"""Fetch and parse the actor table information for one or more actor IDs.
Args:
actor_id: A hex string of the actor ID to fetch information about.
If this is None, then the actor table is fetched.
Returns:
Information from the actor table.
"""
self._check_connected()
if actor_id is not None:
actor_id = ray.ActorID(hex_to_binary(actor_id))
return self._actor_table(actor_id)
else:
actor_table_keys = list(
self.redis_client.scan_iter(
match=gcs_utils.TablePrefix_ACTOR_string + "*"))
actor_ids_binary = [
key[len(gcs_utils.TablePrefix_ACTOR_string):]
for key in actor_table_keys
]
results = {}
for actor_id_binary in actor_ids_binary:
results[binary_to_hex(actor_id_binary)] = self._actor_table(
ray.ActorID(actor_id_binary))
return results
def _task_table(self, task_id):
"""Fetch and parse the task table information for a single task ID.
Args:
task_id: A task ID to get information about.
Returns:
A dictionary with information about the task ID in question.
"""
assert isinstance(task_id, ray.TaskID)
message = self._execute_command(
task_id, "RAY.TABLE_LOOKUP",
gcs_utils.TablePrefix.Value("RAYLET_TASK"), "", task_id.binary())
if message is None:
return {}
gcs_entries = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entries.entries) == 1
task_table_data = gcs_utils.TaskTableData.FromString(
gcs_entries.entries[0])
task = ray._raylet.TaskSpec.from_string(
task_table_data.task.task_spec.SerializeToString())
function_descriptor_list = task.function_descriptor_list()
function_descriptor = FunctionDescriptor.from_bytes_list(
function_descriptor_list)
task_spec_info = {
"JobID": task.job_id().hex(),
"TaskID": task.task_id().hex(),
"ParentTaskID": task.parent_task_id().hex(),
"ParentCounter": task.parent_counter(),
"ActorID": (task.actor_id().hex()),
"ActorCreationID": task.actor_creation_id().hex(),
"ActorCreationDummyObjectID": (
task.actor_creation_dummy_object_id().hex()),
"PreviousActorTaskDummyObjectID": (
task.previous_actor_task_dummy_object_id().hex()),
"ActorCounter": task.actor_counter(),
"Args": task.arguments(),
"ReturnObjectIDs": task.returns(),
"RequiredResources": task.required_resources(),
"FunctionID": function_descriptor.function_id.hex(),
"FunctionHash": binary_to_hex(function_descriptor.function_hash),
"ModuleName": function_descriptor.module_name,
"ClassName": function_descriptor.class_name,
"FunctionName": function_descriptor.function_name,
}
execution_spec = ray._raylet.TaskExecutionSpec.from_string(
task_table_data.task.task_execution_spec.SerializeToString())
return {
"ExecutionSpec": {
"NumForwards": execution_spec.num_forwards(),
},
"TaskSpec": task_spec_info
}
def task_table(self, task_id=None):
"""Fetch and parse the task table information for one or more task IDs.
Args:
task_id: A hex string of the task ID to fetch information about. If
this is None, then the task object table is fetched.
Returns:
Information from the task table.
"""
self._check_connected()
if task_id is not None:
task_id = ray.TaskID(hex_to_binary(task_id))
return self._task_table(task_id)
else:
task_table_keys = self._keys(
gcs_utils.TablePrefix_RAYLET_TASK_string + "*")
task_ids_binary = [
key[len(gcs_utils.TablePrefix_RAYLET_TASK_string):]
for key in task_table_keys
]
results = {}
for task_id_binary in task_ids_binary:
results[binary_to_hex(task_id_binary)] = self._task_table(
ray.TaskID(task_id_binary))
return results
def client_table(self):
"""Fetch and parse the Redis DB client table.
Returns:
Information about the Ray clients in the cluster.
"""
self._check_connected()
client_table = _parse_client_table(self.redis_client)
for client in client_table:
# These are equivalent and is better for application developers.
client["alive"] = client["Alive"]
return client_table
def _job_table(self, job_id):
"""Fetch and parse the job table information for a single job ID.
Args:
job_id: A job ID or hex string to get information about.
Returns:
A dictionary with information about the job ID in question.
"""
# Allow the argument to be either a JobID or a hex string.
if not isinstance(job_id, ray.JobID):
assert isinstance(job_id, str)
job_id = ray.JobID(hex_to_binary(job_id))
# Return information about a single job ID.
message = self.redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("JOB"), "",
job_id.binary())
if message is None:
return {}
gcs_entry = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entry.entries) > 0
job_info = {}
for i in range(len(gcs_entry.entries)):
entry = gcs_utils.JobTableData.FromString(gcs_entry.entries[i])
assert entry.job_id == job_id.binary()
job_info["JobID"] = job_id.hex()
job_info["NodeManagerAddress"] = entry.node_manager_address
job_info["DriverPid"] = entry.driver_pid
if entry.is_dead:
job_info["StopTime"] = entry.timestamp
else:
job_info["StartTime"] = entry.timestamp
return job_info
def job_table(self):
"""Fetch and parse the Redis job table.
Returns:
Information about the Ray jobs in the cluster,
namely a list of dicts with keys:
- "JobID" (identifier for the job),
- "NodeManagerAddress" (IP address of the driver for this job),
- "DriverPid" (process ID of the driver for this job),
- "StartTime" (UNIX timestamp of the start time of this job),
- "StopTime" (UNIX timestamp of the stop time of this job, if any)
"""
self._check_connected()
job_keys = self.redis_client.keys(gcs_utils.TablePrefix_JOB_string +
"*")
job_ids_binary = {
key[len(gcs_utils.TablePrefix_JOB_string):]
for key in job_keys
}
results = []
for job_id_binary in job_ids_binary:
results.append(self._job_table(binary_to_hex(job_id_binary)))
return results
def _profile_table(self, batch_id):
"""Get the profile events for a given batch of profile events.
Args:
batch_id: An identifier for a batch of profile events.
Returns:
A list of the profile events for the specified batch.
"""
# TODO(rkn): This method should support limiting the number of log
# events and should also support returning a window of events.
message = self._execute_command(batch_id, "RAY.TABLE_LOOKUP",
gcs_utils.TablePrefix.Value("PROFILE"),
"", batch_id.binary())
if message is None:
return []
gcs_entries = gcs_utils.GcsEntry.FromString(message)
profile_events = []
for entry in gcs_entries.entries:
profile_table_message = gcs_utils.ProfileTableData.FromString(
entry)
component_type = profile_table_message.component_type
component_id = binary_to_hex(profile_table_message.component_id)
node_ip_address = profile_table_message.node_ip_address
for profile_event_message in profile_table_message.profile_events:
try:
extra_data = json.loads(profile_event_message.extra_data)
except ValueError:
extra_data = {}
profile_event = {
"event_type": profile_event_message.event_type,
"component_id": component_id,
"node_ip_address": node_ip_address,
"component_type": component_type,
"start_time": profile_event_message.start_time,
"end_time": profile_event_message.end_time,
"extra_data": extra_data
}
profile_events.append(profile_event)
return profile_events
def profile_table(self):
self._check_connected()
profile_table_keys = self._keys(gcs_utils.TablePrefix_PROFILE_string +
"*")
batch_identifiers_binary = [
key[len(gcs_utils.TablePrefix_PROFILE_string):]
for key in profile_table_keys
]
result = defaultdict(list)
for batch_id in batch_identifiers_binary:
profile_data = self._profile_table(binary_to_object_id(batch_id))
# Note that if keys are being evicted from Redis, then it is
# possible that the batch will be evicted before we get it.
if len(profile_data) > 0:
component_id = profile_data[0]["component_id"]
result[component_id].extend(profile_data)
return dict(result)
def _seconds_to_microseconds(self, time_in_seconds):
"""A helper function for converting seconds to microseconds."""
time_in_microseconds = 10**6 * time_in_seconds
return time_in_microseconds
# Colors are specified at
# https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html. # noqa: E501
_default_color_mapping = defaultdict(
lambda: "generic_work", {
"worker_idle": "cq_build_abandoned",
"task": "rail_response",
"task:deserialize_arguments": "rail_load",
"task:execute": "rail_animation",
"task:store_outputs": "rail_idle",
"wait_for_function": "detailed_memory_dump",
"ray.get": "good",
"ray.put": "terrible",
"ray.wait": "vsync_highlight_color",
"submit_task": "background_memory_dump",
"fetch_and_run_function": "detailed_memory_dump",
"register_remote_function": "detailed_memory_dump",
})
# These colors are for use in Chrome tracing.
_chrome_tracing_colors = [
"thread_state_uninterruptible",
"thread_state_iowait",
"thread_state_running",
"thread_state_runnable",
"thread_state_sleeping",
"thread_state_unknown",
"background_memory_dump",
"light_memory_dump",
"detailed_memory_dump",
"vsync_highlight_color",
"generic_work",
"good",
"bad",
"terrible",
# "black",
# "grey",
# "white",
"yellow",
"olive",
"rail_response",
"rail_animation",
"rail_idle",
"rail_load",
"startup",
"heap_dump_stack_frame",
"heap_dump_object_type",
"heap_dump_child_node_arrow",
"cq_build_running",
"cq_build_passed",
"cq_build_failed",
"cq_build_abandoned",
"cq_build_attempt_runnig",
"cq_build_attempt_passed",
"cq_build_attempt_failed",
]
def chrome_tracing_dump(self, filename=None):
"""Return a list of profiling events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
# TODO(rkn): Support including the task specification data in the
# timeline.
# TODO(rkn): This should support viewing just a window of time or a
# limited number of events.
self._check_connected()
profile_table = self.profile_table()
all_events = []
for component_id_hex, component_events in profile_table.items():
# Only consider workers and drivers.
component_type = component_events[0]["component_type"]
if component_type not in ["worker", "driver"]:
continue
for event in component_events:
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": event["node_ip_address"],
# The identifier for the row that the event appears in.
"tid": event["component_type"] + ":" +
event["component_id"],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": self._default_color_mapping[event["event_type"]],
# The extra user-defined data.
"args": event["extra_data"],
}
# Modify the json with the additional user-defined extra data.
# This can be used to add fields or override existing fields.
if "cname" in event["extra_data"]:
new_event["cname"] = event["extra_data"]["cname"]
if "name" in event["extra_data"]:
new_event["name"] = event["extra_data"]["name"]
all_events.append(new_event)
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events
def chrome_tracing_object_transfer_dump(self, filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
self._check_connected()
node_id_to_address = {}
for node_info in self.client_table():
node_id_to_address[node_info["NodeID"]] = "{}:{}".format(
node_info["NodeManagerAddress"],
node_info["ObjectManagerPort"])
all_events = []
for key, items in self.profile_table().items():
# Only consider object manager events.
if items[0]["component_type"] != "object_manager":
continue
for event in items:
if event["event_type"] == "transfer_send":
object_id, remote_node_id, _, _ = event["extra_data"]
elif event["event_type"] == "transfer_receive":
object_id, remote_node_id, _, _ = event["extra_data"]
elif event["event_type"] == "receive_pull_request":
object_id, remote_node_id = event["extra_data"]
else:
assert False, "This should be unreachable."
# Choose a color by reading the first couple of hex digits of
# the object ID as an integer and turning that into a color.
object_id_int = int(object_id[:2], 16)
color = self._chrome_tracing_colors[object_id_int % len(
self._chrome_tracing_colors)]
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": node_id_to_address[key],
# The identifier for the row that the event appears in.
"tid": node_id_to_address[remote_node_id],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": color,
# The extra user-defined data.
"args": event["extra_data"],
}
all_events.append(new_event)
# Add another box with a color indicating whether it was a send
# or a receive event.
if event["event_type"] == "transfer_send":
additional_event = new_event.copy()
additional_event["cname"] = "black"
all_events.append(additional_event)
elif event["event_type"] == "transfer_receive":
additional_event = new_event.copy()
additional_event["cname"] = "grey"
all_events.append(additional_event)
else:
pass
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events
def workers(self):
"""Get a dictionary mapping worker ID to worker information."""
self._check_connected()
worker_keys = self.redis_client.keys("Worker*")
workers_data = {}
for worker_key in worker_keys:
worker_info = self.redis_client.hgetall(worker_key)
worker_id = binary_to_hex(worker_key[len("Workers:"):])
workers_data[worker_id] = {
"node_ip_address": decode(worker_info[b"node_ip_address"]),
"plasma_store_socket": decode(
worker_info[b"plasma_store_socket"])
}
if b"stderr_file" in worker_info:
workers_data[worker_id]["stderr_file"] = decode(
worker_info[b"stderr_file"])
if b"stdout_file" in worker_info:
workers_data[worker_id]["stdout_file"] = decode(
worker_info[b"stdout_file"])
return workers_data
def _job_length(self):
event_log_sets = self.redis_client.keys("event_log*")
overall_smallest = sys.maxsize
overall_largest = 0
num_tasks = 0
for event_log_set in event_log_sets:
fwd_range = self.redis_client.zrange(
event_log_set, start=0, end=0, withscores=True)
overall_smallest = min(overall_smallest, fwd_range[0][1])
rev_range = self.redis_client.zrevrange(
event_log_set, start=0, end=0, withscores=True)
overall_largest = max(overall_largest, rev_range[0][1])
num_tasks += self.redis_client.zcount(
event_log_set, min=0, max=time.time())
if num_tasks == 0:
return 0, 0, 0
return overall_smallest, overall_largest, num_tasks
def cluster_resources(self):
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
self._check_connected()
resources = defaultdict(int)
clients = self.client_table()
for client in clients:
# Only count resources from latest entries of live clients.
if client["Alive"]:
for key, value in client["Resources"].items():
resources[key] += value
return dict(resources)
def _live_client_ids(self):
"""Returns a set of client IDs corresponding to clients still alive."""
return {
client["NodeID"]
for client in self.client_table() if (client["Alive"])
}
def available_resources(self):
"""Get the current available cluster resources.
This is different from `cluster_resources` in that this will return
idle (available) resources rather than total resources.
Note that this information can grow stale as tasks start and finish.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
self._check_connected()
available_resources_by_id = {}
subscribe_clients = [
redis_client.pubsub(ignore_subscribe_messages=True)
for redis_client in self.redis_clients
]
for subscribe_client in subscribe_clients:
subscribe_client.subscribe(gcs_utils.XRAY_HEARTBEAT_CHANNEL)
client_ids = self._live_client_ids()
while set(available_resources_by_id.keys()) != client_ids:
for subscribe_client in subscribe_clients:
# Parse client message
raw_message = subscribe_client.get_message()
if (raw_message is None or raw_message["channel"] !=
gcs_utils.XRAY_HEARTBEAT_CHANNEL):
continue
data = raw_message["data"]
gcs_entries = gcs_utils.GcsEntry.FromString(data)
heartbeat_data = gcs_entries.entries[0]
message = gcs_utils.HeartbeatTableData.FromString(
heartbeat_data)
# Calculate available resources for this client
num_resources = len(message.resources_available_label)
dynamic_resources = {}
for i in range(num_resources):
resource_id = message.resources_available_label[i]
dynamic_resources[resource_id] = (
message.resources_available_capacity[i])
# Update available resources for this client
client_id = ray.utils.binary_to_hex(message.client_id)
available_resources_by_id[client_id] = dynamic_resources
# Update clients in cluster
client_ids = self._live_client_ids()
# Remove disconnected clients
for client_id in available_resources_by_id.keys():
if client_id not in client_ids:
del available_resources_by_id[client_id]
# Calculate total available resources
total_available_resources = defaultdict(int)
for available_resources in available_resources_by_id.values():
for resource_id, num_available in available_resources.items():
total_available_resources[resource_id] += num_available
# Close the pubsub clients to avoid leaking file descriptors.
for subscribe_client in subscribe_clients:
subscribe_client.close()
return dict(total_available_resources)
def _error_messages(self, job_id):
"""Get the error messages for a specific driver.
Args:
job_id: The ID of the job to get the errors for.
Returns:
A list of the error messages for this driver.
"""
assert isinstance(job_id, ray.JobID)
message = self.redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("ERROR_INFO"), "",
job_id.binary())
# If there are no errors, return early.
if message is None:
return []
gcs_entries = gcs_utils.GcsEntry.FromString(message)
error_messages = []
for entry in gcs_entries.entries:
error_data = gcs_utils.ErrorTableData.FromString(entry)
assert job_id.binary() == error_data.job_id
error_message = {
"type": error_data.type,
"message": error_data.error_message,
"timestamp": error_data.timestamp,
}
error_messages.append(error_message)
return error_messages
def error_messages(self, job_id=None):
"""Get the error messages for all drivers or a specific driver.
Args:
job_id: The specific job to get the errors for. If this is
None, then this method retrieves the errors for all jobs.
Returns:
A list of the error messages for the specified driver if one was
given, or a dictionary mapping from job ID to a list of error
messages for that driver otherwise.
"""
self._check_connected()
if job_id is not None:
assert isinstance(job_id, ray.JobID)
return self._error_messages(job_id)
error_table_keys = self.redis_client.keys(
gcs_utils.TablePrefix_ERROR_INFO_string + "*")
job_ids = [
key[len(gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(job_id): self._error_messages(ray.JobID(job_id))
for job_id in job_ids
}
def actor_checkpoint_info(self, actor_id):
"""Get checkpoint info for the given actor id.
Args:
actor_id: Actor's ID.
Returns:
A dictionary with information about the actor's checkpoint IDs and
their timestamps.
"""
self._check_connected()
message = self._execute_command(
actor_id,
"RAY.TABLE_LOOKUP",
gcs_utils.TablePrefix.Value("ACTOR_CHECKPOINT_ID"),
"",
actor_id.binary(),
)
if message is None:
return None
gcs_entry = gcs_utils.GcsEntry.FromString(message)
entry = gcs_utils.ActorCheckpointIdData.FromString(
gcs_entry.entries[0])
checkpoint_ids = [
ray.ActorCheckpointID(checkpoint_id)
for checkpoint_id in entry.checkpoint_ids
]
return {
"ActorID": ray.utils.binary_to_hex(entry.actor_id),
"CheckpointIds": checkpoint_ids,
"Timestamps": list(entry.timestamps),
}
state = GlobalState()
"""A global object used to access the cluster's global state."""
def jobs():
"""Get a list of the jobs in the cluster.
Returns:
Information from the job table, namely a list of dicts with keys:
- "JobID" (identifier for the job),
- "NodeManagerAddress" (IP address of the driver for this job),
- "DriverPid" (process ID of the driver for this job),
- "StartTime" (UNIX timestamp of the start time of this job),
- "StopTime" (UNIX timestamp of the stop time of this job, if any)
"""
return state.job_table()
def nodes():
"""Get a list of the nodes in the cluster.
Returns:
Information about the Ray clients in the cluster.
"""
return state.client_table()
def current_node_id():
"""Return the node id of the current node.
For example, "node:172.10.5.34". This can be used as a custom resource,
e.g., {node_id: 1} to reserve the whole node, or {node_id: 0.001} to
just force placement on the node.
Returns:
Id of the current node.
"""
return ray.resource_spec.NODE_ID_PREFIX + ray.services.get_node_ip_address(
)
def node_ids():
"""Get a list of the node ids in the cluster.
For example, ["node:172.10.5.34", "node:172.42.3.77"]. These can be used
as custom resources, e.g., {node_id: 1} to reserve the whole node, or
{node_id: 0.001} to just force placement on the node.
Returns:
List of the node resource ids.
"""
node_ids = []
for node in nodes():
for k, v in node["Resources"].items():
if k.startswith(ray.resource_spec.NODE_ID_PREFIX):
node_ids.append(k)
return node_ids
def actors(actor_id=None):
"""Fetch and parse the actor info for one or more actor IDs.
Args:
actor_id: A hex string of the actor ID to fetch information about. If
this is None, then all actor information is fetched.
Returns:
Information about the actors.
"""
return state.actor_table(actor_id=actor_id)
def tasks(task_id=None):
"""Fetch and parse the task table information for one or more task IDs.
Args:
task_id: A hex string of the task ID to fetch information about. If
this is None, then the task object table is fetched.
Returns:
Information from the task table.
"""
return state.task_table(task_id=task_id)
def objects(object_id=None):
"""Fetch and parse the object table info for one or more object IDs.
Args:
object_id: An object ID to fetch information about. If this is None,
then the entire object table is fetched.
Returns:
Information from the object table.
"""
return state.object_table(object_id=object_id)
def timeline(filename=None):
"""Return a list of profiling events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file by
passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling events.
Each profile event is a dictionary.
"""
return state.chrome_tracing_dump(filename=filename)
def object_transfer_timeline(filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file by
passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file. Make
sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling events.
Each profile event is a dictionary.
"""
return state.chrome_tracing_object_transfer_dump(filename=filename)
def cluster_resources():
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or removed
from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
return state.cluster_resources()
def available_resources():
"""Get the current available cluster resources.
This is different from `cluster_resources` in that this will return idle
(available) resources rather than total resources.
Note that this information can grow stale as tasks start and finish.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
return state.available_resources()
def errors(all_jobs=False):
"""Get error messages from the cluster.
Args:
all_jobs: False if we should only include error messages for this
specific job, or True if we should include error messages for all
jobs.
Returns:
Error messages pushed from the cluster. This will be a single list if
all_jobs is False, or a dictionary mapping from job ID to a list of
error messages for that job if all_jobs is True.
"""
if not all_jobs:
worker = ray.worker.global_worker
error_messages = state.error_messages(job_id=worker.current_job_id)
else:
error_messages = state.error_messages(job_id=None)
return error_messages
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/test_utils.py
|
Python
|
import json
import fnmatch
import os
import subprocess
import sys
import tempfile
import time
import psutil
import ray
class RayTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process {} to exit.".format(pid))
def wait_for_children_of_pid(pid, num_children=1, timeout=20):
p = psutil.Process(pid)
start_time = time.time()
while time.time() - start_time < timeout:
num_alive = len(p.children(recursive=False))
if num_alive >= num_children:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process {} children to start "
"({}/{} started).".format(pid, num_alive, num_children))
def wait_for_children_of_pid_to_exit(pid, timeout=20):
children = psutil.Process(pid).children()
if len(children) == 0:
return
_, alive = psutil.wait_procs(children, timeout=timeout)
if len(alive) > 0:
raise RayTestTimeoutException(
"Timed out while waiting for process children to exit."
" Children still alive: {}.".format([p.name() for p in alive]))
def kill_process_by_name(name, SIGKILL=False):
for p in psutil.process_iter(attrs=["name"]):
if p.info["name"] == name:
if SIGKILL:
p.kill()
else:
p.terminate()
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
# Save the driver script as a file so we can call it using subprocess.
with tempfile.NamedTemporaryFile() as f:
f.write(driver_script.encode("ascii"))
f.flush()
out = ray.utils.decode(
subprocess.check_output(
[sys.executable, f.name], stderr=subprocess.STDOUT))
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
# Save the driver script as a file so we can call it using subprocess. We
# do not delete this file because if we do then it may get removed before
# the Python process tries to run it.
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(driver_script.encode("ascii"))
f.flush()
return subprocess.Popen(
[sys.executable, f.name], stdout=subprocess.PIPE)
def flat_errors():
errors = []
for job_errors in ray.errors(all_jobs=True).values():
errors.extend(job_errors)
return errors
def relevant_errors(error_type):
return [error for error in flat_errors() if error["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out waiting for {} {} errors.".format(
num_errors, error_type))
def wait_for_condition(condition_predictor,
timeout_ms=1000,
retry_interval_ms=100):
"""A helper function that waits until a condition is met.
Args:
condition_predictor: A function that predicts the condition.
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether the condition is met within the timeout.
"""
time_elapsed = 0
while time_elapsed <= timeout_ms:
if condition_predictor():
return True
time_elapsed += retry_interval_ms
time.sleep(retry_interval_ms / 1000.0)
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def generate_internal_config_map(**kwargs):
internal_config = json.dumps(kwargs)
ray_kwargs = {
"_internal_config": internal_config,
}
return ray_kwargs
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/conftest.py
|
Python
|
"""
This file defines the common pytest fixtures used in current directory.
"""
from contextlib import contextmanager
import json
import pytest
import subprocess
import ray
from ray.cluster_utils import Cluster
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def get_default_fixure_internal_config():
internal_config = json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
})
return internal_config
def get_default_fixture_ray_kwargs():
internal_config = get_default_fixure_internal_config()
ray_kwargs = {
"num_cpus": 1,
"object_store_memory": 150 * 1024 * 1024,
"_internal_config": internal_config,
}
return ray_kwargs
@contextmanager
def _ray_start(**kwargs):
init_kwargs = get_default_fixture_ray_kwargs()
init_kwargs.update(kwargs)
# Start the Ray processes.
address_info = ray.init(**init_kwargs)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
# The following fixture will start ray with 0 cpu.
@pytest.fixture
def ray_start_no_cpu(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=0, **param) as res:
yield res
# The following fixture will start ray with 1 cpu.
@pytest.fixture
def ray_start_regular(request):
param = getattr(request, "param", {})
with _ray_start(**param) as res:
yield res
@pytest.fixture(scope="session")
def ray_start_regular_shared(request):
param = getattr(request, "param", {})
with _ray_start(**param) as res:
yield res
@pytest.fixture
def ray_start_2_cpus(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=2, **param) as res:
yield res
@pytest.fixture
def ray_start_10_cpus(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=10, **param) as res:
yield res
@contextmanager
def _ray_start_cluster(**kwargs):
init_kwargs = get_default_fixture_ray_kwargs()
num_nodes = 0
do_init = False
# num_nodes & do_init are not arguments for ray.init, so delete them.
if "num_nodes" in kwargs:
num_nodes = kwargs["num_nodes"]
del kwargs["num_nodes"]
if "do_init" in kwargs:
do_init = kwargs["do_init"]
del kwargs["do_init"]
elif num_nodes > 0:
do_init = True
init_kwargs.update(kwargs)
cluster = Cluster()
remote_nodes = []
for _ in range(num_nodes):
remote_nodes.append(cluster.add_node(**init_kwargs))
if do_init:
ray.init(address=cluster.address)
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
# This fixture will start a cluster with empty nodes.
@pytest.fixture
def ray_start_cluster(request):
param = getattr(request, "param", {})
with _ray_start_cluster(**param) as res:
yield res
@pytest.fixture
def ray_start_cluster_head(request):
param = getattr(request, "param", {})
with _ray_start_cluster(do_init=True, num_nodes=1, **param) as res:
yield res
@pytest.fixture
def ray_start_cluster_2_nodes(request):
param = getattr(request, "param", {})
with _ray_start_cluster(do_init=True, num_nodes=2, **param) as res:
yield res
@pytest.fixture
def ray_start_object_store_memory(request):
# Start the Ray processes.
store_size = request.param
internal_config = get_default_fixure_internal_config()
init_kwargs = {
"num_cpus": 1,
"_internal_config": internal_config,
"object_store_memory": store_size,
}
ray.init(**init_kwargs)
yield store_size
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def call_ray_start(request):
parameter = getattr(request, "param", "ray start --head --num-cpus=1")
command_args = parameter.split(" ")
out = ray.utils.decode(
subprocess.check_output(command_args, stderr=subprocess.STDOUT))
# Get the redis address from the output.
redis_substring_prefix = "--address='"
address_location = (
out.find(redis_substring_prefix) + len(redis_substring_prefix))
address = out[address_location:]
address = address.split("'")[0]
yield address
# Disconnect from the Ray cluster.
ray.shutdown()
# Kill the Ray cluster.
subprocess.check_output(["ray", "stop"])
@pytest.fixture
def call_ray_stop_only():
yield
subprocess.check_output(["ray", "stop"])
@pytest.fixture()
def two_node_cluster():
internal_config = json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
})
cluster = ray.cluster_utils.Cluster(
head_node_args={"_internal_config": internal_config})
for _ in range(2):
remote_node = cluster.add_node(
num_cpus=1, _internal_config=internal_config)
ray.init(address=cluster.address)
yield cluster, remote_node
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/perf_integration_tests/test_perf_integration.py
|
Python
|
import numpy as np
import pytest
import ray
from ray.tests.conftest import _ray_start_cluster
num_tasks_submitted = [10**n for n in range(0, 6)]
num_tasks_ids = ["{}_tasks".format(i) for i in num_tasks_submitted]
@ray.remote
def dummy_task(val):
return val
def benchmark_task_submission(num_tasks):
total_tasks = 100000
for _ in range(total_tasks // num_tasks):
ray.get([dummy_task.remote(i) for i in range(num_tasks)])
def warmup():
x = np.zeros(10**6, dtype=np.uint8)
for _ in range(5):
for _ in range(5):
ray.put(x)
for _ in range(5):
ray.get([dummy_task.remote(0) for _ in range(1000)])
@pytest.mark.benchmark
@pytest.mark.parametrize("num_tasks", num_tasks_submitted, ids=num_tasks_ids)
def test_task_submission(benchmark, num_tasks):
num_cpus = 16
ray.init(
num_cpus=num_cpus,
object_store_memory=150 * 1024 * 1024,
ignore_reinit_error=True)
# warm up the plasma store
warmup()
benchmark(benchmark_task_submission, num_tasks)
ray.shutdown()
def benchmark_task_forward(f, num_tasks):
ray.get([f.remote() for _ in range(num_tasks)])
@pytest.mark.benchmark
@pytest.mark.parametrize(
"num_tasks", [10**3, 10**4],
ids=[str(num) + "_tasks" for num in [10**3, 10**4]])
def test_task_forward(benchmark, num_tasks):
with _ray_start_cluster(
do_init=True,
num_nodes=1,
num_cpus=16,
object_store_memory=150 * 1024 * 1024,
) as cluster:
cluster.add_node(
num_cpus=16,
object_store_memory=150 * 1024 * 1024,
resources={"my_resource": 100},
)
@ray.remote(resources={"my_resource": 0.001})
def f():
return 1
# Warm up
ray.get([f.remote() for _ in range(100)])
benchmark(benchmark_task_forward, f, num_tasks)
def benchmark_transfer_object(actor, object_ids):
ray.get(actor.f.remote(object_ids))
@pytest.mark.benchmark
@pytest.mark.parametrize("object_number, data_size",
[(10000, 500), (10000, 5000), (1000, 500),
(1000, 5000)])
def test_transfer_performance(benchmark, ray_start_cluster_head, object_number,
data_size):
cluster = ray_start_cluster_head
cluster.add_node(resources={"my_resource": 1}, object_store_memory=10**9)
@ray.remote(resources={"my_resource": 1})
class ObjectActor:
def f(self, object_ids):
ray.get(object_ids)
# setup remote actor
actor = ObjectActor.remote()
actor.f.remote([])
data = bytes(1) * data_size
object_ids = [ray.put(data) for _ in range(object_number)]
benchmark(benchmark_transfer_object, actor, object_ids)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/py3_test.py
|
Python
|
# coding: utf-8
import asyncio
import threading
import pytest
import sys
import ray
import ray.cluster_utils
import ray.test_utils
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_args_force_positional(ray_start_regular):
def force_positional(*, a="hello", b="helxo", **kwargs):
return a, b, kwargs
class TestActor():
def force_positional(self, a="hello", b="heo", *args, **kwargs):
return a, b, args, kwargs
def test_function(fn, remote_fn):
assert fn(a=1, b=3, c=5) == ray.get(remote_fn.remote(a=1, b=3, c=5))
assert fn(a=1) == ray.get(remote_fn.remote(a=1))
assert fn(a=1) == ray.get(remote_fn.remote(a=1))
remote_test_function = ray.remote(test_function)
remote_force_positional = ray.remote(force_positional)
test_function(force_positional, remote_force_positional)
ray.get(
remote_test_function.remote(force_positional, remote_force_positional))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.force_positional
local_actor = TestActor()
local_method = local_actor.force_positional
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": False
}, {
"local_mode": True
}],
indirect=True)
def test_args_intertwined(ray_start_regular):
def args_intertwined(a, *args, x="hello", **kwargs):
return a, args, x, kwargs
class TestActor():
def args_intertwined(self, a, *args, x="hello", **kwargs):
return a, args, x, kwargs
@classmethod
def cls_args_intertwined(cls, a, *args, x="hello", **kwargs):
return a, args, x, kwargs
def test_function(fn, remote_fn):
assert fn(
1, 2, 3, x="hi", y="hello") == ray.get(
remote_fn.remote(1, 2, 3, x="hi", y="hello"))
assert fn(
1, 2, 3, y="1hello") == ray.get(
remote_fn.remote(1, 2, 3, y="1hello"))
assert fn(1, y="1hello") == ray.get(remote_fn.remote(1, y="1hello"))
remote_test_function = ray.remote(test_function)
remote_args_intertwined = ray.remote(args_intertwined)
test_function(args_intertwined, remote_args_intertwined)
ray.get(
remote_test_function.remote(args_intertwined, remote_args_intertwined))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.args_intertwined
local_actor = TestActor()
local_method = local_actor.args_intertwined
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
actor_method = remote_actor.cls_args_intertwined
local_actor = TestActor()
local_method = local_actor.cls_args_intertwined
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
def test_asyncio_actor(ray_start_regular_shared):
@ray.remote
class AsyncBatcher:
def __init__(self):
self.batch = []
self.event = asyncio.Event()
async def add(self, x):
self.batch.append(x)
if len(self.batch) >= 3:
self.event.set()
else:
await self.event.wait()
return sorted(self.batch)
a = AsyncBatcher.options(is_direct_call=True, is_asyncio=True).remote()
x1 = a.add.remote(1)
x2 = a.add.remote(2)
x3 = a.add.remote(3)
r1 = ray.get(x1)
r2 = ray.get(x2)
r3 = ray.get(x3)
assert r1 == [1, 2, 3]
assert r1 == r2 == r3
def test_asyncio_actor_same_thread(ray_start_regular_shared):
@ray.remote
class Actor:
def sync_thread_id(self):
return threading.current_thread().ident
async def async_thread_id(self):
return threading.current_thread().ident
a = Actor.options(is_direct_call=True, is_asyncio=True).remote()
sync_id, async_id = ray.get(
[a.sync_thread_id.remote(),
a.async_thread_id.remote()])
assert sync_id == async_id
def test_asyncio_actor_concurrency(ray_start_regular_shared):
@ray.remote
class RecordOrder:
def __init__(self):
self.history = []
async def do_work(self):
self.history.append("STARTED")
# Force a context switch
await asyncio.sleep(0)
self.history.append("ENDED")
def get_history(self):
return self.history
num_calls = 10
a = RecordOrder.options(
is_direct_call=True, max_concurrency=1, is_asyncio=True).remote()
ray.get([a.do_work.remote() for _ in range(num_calls)])
history = ray.get(a.get_history.remote())
# We only care about ordered start-end-start-end sequence because
# coroutines may be executed out of enqueued order.
answer = []
for _ in range(num_calls):
for status in ["STARTED", "ENDED"]:
answer.append(status)
assert history == answer
def test_asyncio_actor_high_concurrency(ray_start_regular_shared):
# This tests actor can handle concurrency above recursionlimit.
@ray.remote
class AsyncConcurrencyBatcher:
def __init__(self, batch_size):
self.batch = []
self.event = asyncio.Event()
self.batch_size = batch_size
async def add(self, x):
self.batch.append(x)
if len(self.batch) >= self.batch_size:
self.event.set()
else:
await self.event.wait()
return sorted(self.batch)
batch_size = sys.getrecursionlimit() * 4
actor = AsyncConcurrencyBatcher.options(
is_asyncio=True, max_concurrency=batch_size * 2,
is_direct_call=True).remote(batch_size)
result = ray.get([actor.add.remote(i) for i in range(batch_size)])
assert result[0] == list(range(batch_size))
assert result[-1] == list(range(batch_size))
@pytest.mark.asyncio
async def test_asyncio_get(ray_start_regular_shared, event_loop):
loop = event_loop
asyncio.set_event_loop(loop)
loop.set_debug(True)
# This is needed for async plasma
from ray.experimental.async_api import _async_init
await _async_init()
# Test Async Plasma
@ray.remote
def task():
return 1
assert await ray.async_compat.get_async(task.remote()) == 1
@ray.remote
def task_throws():
1 / 0
with pytest.raises(ray.exceptions.RayTaskError):
await ray.async_compat.get_async(task_throws.remote())
# Test Direct Actor Call
str_len = 200 * 1024
@ray.remote
class DirectActor:
def echo(self, i):
return i
def big_object(self):
# 100Kb is the limit for direct call
return "a" * (str_len)
def throw_error(self):
1 / 0
direct = DirectActor.options(is_direct_call=True).remote()
direct_actor_call_future = ray.async_compat.get_async(
direct.echo.remote(2))
assert await direct_actor_call_future == 2
promoted_to_plasma_future = ray.async_compat.get_async(
direct.big_object.remote())
assert await promoted_to_plasma_future == "a" * str_len
with pytest.raises(ray.exceptions.RayTaskError):
await ray.async_compat.get_async(direct.throw_error.remote())
def test_asyncio_actor_async_get(ray_start_regular_shared):
@ray.remote
def remote_task():
return 1
@ray.remote
class AsyncGetter:
async def get(self):
return await remote_task.remote()
getter = AsyncGetter.options(is_asyncio=True).remote()
assert ray.get(getter.get.remote()) == 1
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_actor.py
|
Python
|
import random
import numpy as np
import os
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import sys
import time
import ray
import ray.test_utils
import ray.cluster_utils
from ray.test_utils import run_string_as_driver
from ray.experimental.internal_kv import _internal_kv_get, _internal_kv_put
def test_actor_init_error_propagated(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, error=False):
if error:
raise Exception("oops")
def foo(self):
return "OK"
actor = Actor.remote(error=False)
ray.get(actor.foo.remote())
actor = Actor.remote(error=True)
with pytest.raises(Exception, match=".*oops.*"):
ray.get(actor.foo.remote())
def test_keyword_args(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, "ab")
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, "ab")
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, "cd")
actor = Actor.remote(1, arg2="c")
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 3, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 1,
"cd")
actor = Actor.remote(1, arg2="c", arg1=2)
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 4, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 2,
"cd")
assert ray.get(actor.get_values.remote(arg2="d", arg1=0, arg0=2)) == (3, 2,
"cd")
# Make sure we get an exception if the constructor is called
# incorrectly.
with pytest.raises(Exception):
actor = Actor.remote()
with pytest.raises(Exception):
actor = Actor.remote(0, 1, 2, arg3=3)
with pytest.raises(Exception):
actor = Actor.remote(0, arg0=1)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with pytest.raises(Exception):
ray.get(actor.get_values.remote())
def test_variable_number_of_args(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, (), ())
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, (), ())
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, ("c", ),
("d", ))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
assert ray.get(actor.get_values.remote(
2, 3, 1, 2, 3, 4)) == (3, 5, ("a", "b", "c", "d"), (1, 2, 3, 4))
@ray.remote
class Actor:
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
assert ray.get(a.get_values.remote()) == ((), ())
a = Actor.remote(1)
assert ray.get(a.get_values.remote(2)) == ((1, ), (2, ))
a = Actor.remote(1, 2)
assert ray.get(a.get_values.remote(3, 4)) == ((1, 2), (3, 4))
def test_no_args(ray_start_regular):
@ray.remote
class Actor:
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_no_constructor(ray_start_regular):
# If no __init__ method is provided, that should not be a problem.
@ray.remote
class Actor:
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_custom_classes(ray_start_regular):
class Foo:
def __init__(self, x):
self.x = x
@ray.remote
class Actor:
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
assert results1[0].x == 1
assert results1[1].x == 2
results2 = ray.get(actor.get_values2.remote(Foo(3)))
assert results2[0].x == 1
assert results2[1].x == 2
assert results2[2].x == 3
def test_actor_class_attributes(ray_start_regular):
class Grandparent:
GRANDPARENT = 2
class Parent1(Grandparent):
PARENT1 = 6
class Parent2:
PARENT2 = 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@classmethod
def f(cls):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_actor_static_attributes(ray_start_regular):
class Grandparent:
GRANDPARENT = 2
@staticmethod
def grandparent_static():
assert Grandparent.GRANDPARENT == 2
return 1
class Parent1(Grandparent):
PARENT1 = 6
@staticmethod
def parent1_static():
assert Parent1.PARENT1 == 6
return 2
def parent1(self):
assert Parent1.PARENT1 == 6
class Parent2:
PARENT2 = 7
def parent2(self):
assert Parent2.PARENT2 == 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@staticmethod
def f():
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_caching_actors(shutdown_only):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo:
def __init__(self):
pass
def get_val(self):
return 3
# Check that we can't actually create actors before ray.init() has been
# called.
with pytest.raises(Exception):
f = Foo.remote()
ray.init(num_cpus=1)
f = Foo.remote()
assert ray.get(f.get_val.remote()) == 3
def test_decorator_args(ray_start_regular):
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote()
class Actor:
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor:
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor:
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor:
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor:
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor:
def __init__(self):
pass
def test_random_id_generation(ray_start_regular):
@ray.remote
class Foo:
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
assert f1._actor_id != f2._actor_id
def test_actor_class_name(ray_start_regular):
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
r = ray.worker.global_worker.redis_client
actor_keys = r.keys("ActorClass*")
assert len(actor_keys) == 1
actor_class_info = r.hgetall(actor_keys[0])
assert actor_class_info[b"class_name"] == b"Foo"
assert b"test_actor" in actor_class_info[b"module"]
def test_actor_inheritance(ray_start_regular):
class NonActorBase:
def __init__(self):
pass
# Test that an actor class can inherit from a non-actor class.
@ray.remote
class ActorBase(NonActorBase):
def __init__(self):
pass
# Test that you can't instantiate an actor class directly.
with pytest.raises(
Exception, match="Actors cannot be instantiated directly."):
ActorBase()
# Test that you can't inherit from an actor class.
with pytest.raises(
TypeError,
match="Inheriting from actor classes is not "
"currently supported."):
class Derived(ActorBase):
def __init__(self):
pass
def test_multiple_return_values(ray_start_regular):
@ray.remote
class Foo:
def method0(self):
return 1
@ray.method(num_return_vals=1)
def method1(self):
return 1
@ray.method(num_return_vals=2)
def method2(self):
return 1, 2
@ray.method(num_return_vals=3)
def method3(self):
return 1, 2, 3
f = Foo.remote()
id0 = f.method0.remote()
assert ray.get(id0) == 1
id1 = f.method1.remote()
assert ray.get(id1) == 1
id2a, id2b = f.method2.remote()
assert ray.get([id2a, id2b]) == [1, 2]
id3a, id3b, id3c = f.method3.remote()
assert ray.get([id3a, id3b, id3c]) == [1, 2, 3]
def test_define_actor(ray_start_regular):
@ray.remote
class Test:
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
assert ray.get(t.f.remote(1)) == 3
# Make sure that calling an actor method directly raises an exception.
with pytest.raises(Exception):
t.f(1)
def test_actor_deletion(ray_start_regular):
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
ray.test_utils.wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[ray.test_utils.wait_for_pid_to_exit(pid) for pid in pids]
def test_actor_method_deletion(ray_start_regular):
@ray.remote
class Actor:
def method(self):
return 1
# TODO(ekl) this doesn't work in Python 2 after the weak ref method change.
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
assert ray.get(Actor.remote().method.remote()) == 1
def test_multiple_actors(ray_start_regular):
@ray.remote
class Counter:
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 5
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote() for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
v = result_values[(num_increases * i):(num_increases * (i + 1))]
assert v == list(range(i + 1, num_increases + i + 1))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
v = result_values[(num_actors * j):(num_actors * (j + 1))]
assert v == num_actors * [j + 1]
def test_remote_function_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor:
def __init__(self, x):
self.x = x
self.y = val2
self.object_ids = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_ids, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_ids):
return ray.get(object_ids)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
assert values[0] == 1
assert values[1] == val2
assert ray.get(values[2]) == list(range(1, 6))
assert values[3] == list(range(1, 6))
assert ray.get(ray.get(actor.f.remote())) == list(range(1, 6))
assert ray.get(actor.g.remote()) == list(range(1, 6))
assert ray.get(actor.h.remote([f.remote(i) for i in range(5)])) == list(
range(1, 6))
def test_define_actor_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
assert ray.get(actor1.get_values.remote(5)) == (3, 5)
def test_use_actor_within_actor(ray_start_10_cpus):
# Make sure we can use actors within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2:
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
assert ray.get(actor2.get_values.remote(5)) == (3, 4)
def test_define_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can define and actors within remote funtions.
@ray.remote
def f(x, n):
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
assert ray.get(f.remote(3, 1)) == [3]
assert ray.get(
[f.remote(i, 20) for i in range(10)]) == [20 * [i] for i in range(10)]
def test_use_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can create and use actors within remote funtions.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
assert ray.get(f.remote(3)) == 3
def test_actor_import_counter(ray_start_10_cpus):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor:
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
assert ray.get(g.remote()) == num_remote_functions - 1
def test_inherit_actor_from_class(ray_start_regular):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
class Foo:
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
assert ray.get(actor.get_value.remote()) == 1
assert ray.get(actor.g.remote(5)) == 6
def test_remote_functions_not_scheduled_on_actors(ray_start_regular):
# Make sure that regular remote functions are not scheduled on actors.
@ray.remote
class Actor:
def __init__(self):
pass
def get_id(self):
return ray.worker.global_worker.worker_id
a = Actor.remote()
actor_id = ray.get(a.get_id.remote())
@ray.remote
def f():
return ray.worker.global_worker.worker_id
resulting_ids = ray.get([f.remote() for _ in range(100)])
assert actor_id not in resulting_ids
def test_actors_on_nodes_with_no_cpus(ray_start_no_cpu):
@ray.remote
class Foo:
def method(self):
pass
f = Foo.remote()
ready_ids, _ = ray.wait([f.method.remote()], timeout=0.1)
assert ready_ids == []
def test_actor_load_balancing(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
class Actor1:
def __init__(self):
pass
def get_location(self):
return ray.worker.global_worker.node.unique_id
# Create a bunch of actors.
num_actors = 30
num_attempts = 20
minimum_count = 5
# Make sure that actors are spread between the raylets.
attempts = 0
while attempts < num_attempts:
actors = [Actor1.remote() for _ in range(num_actors)]
locations = ray.get([actor.get_location.remote() for actor in actors])
names = set(locations)
counts = [locations.count(name) for name in names]
print("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
# Make sure we can get the results of a bunch of tasks.
results = []
for _ in range(1000):
index = np.random.randint(num_actors)
results.append(actors[index].get_location.remote())
ray.get(results)
def test_actor_lifetime_load_balancing(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def ping(self):
return
actors = [Actor.remote() for _ in range(num_nodes)]
ray.get([actor.ping.remote() for actor in actors])
def test_exception_raised_when_actor_node_dies(ray_start_cluster_head):
cluster = ray_start_cluster_head
remote_node = cluster.add_node()
@ray.remote(max_reconstructions=0)
class Counter:
def __init__(self):
self.x = 0
def node_id(self):
return ray.worker.global_worker.node.unique_id
def inc(self):
self.x += 1
return self.x
# Create an actor that is not on the raylet.
actor = Counter.remote()
while (ray.get(actor.node_id.remote()) != remote_node.unique_id):
actor = Counter.remote()
# Kill the second node.
cluster.remove_node(remote_node)
# Submit some new actor tasks both before and after the node failure is
# detected. Make sure that getting the result raises an exception.
for _ in range(10):
# Submit some new actor tasks.
x_ids = [actor.inc.remote() for _ in range(5)]
for x_id in x_ids:
with pytest.raises(ray.exceptions.RayActorError):
# There is some small chance that ray.get will actually
# succeed (if the object is transferred before the raylet
# dies).
ray.get(x_id)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_actor_init_fails(ray_start_cluster_head):
cluster = ray_start_cluster_head
remote_node = cluster.add_node()
@ray.remote(max_reconstructions=1)
class Counter:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
return self.x
# Create many actors. It should take a while to finish initializing them.
actors = [Counter.remote() for _ in range(15)]
# Allow some time to forward the actor creation tasks to the other node.
time.sleep(0.1)
# Kill the second node.
cluster.remove_node(remote_node)
# Get all of the results.
results = ray.get([actor.inc.remote() for actor in actors])
assert results == [1 for actor in actors]
def test_reconstruction_suppression(ray_start_cluster_head):
cluster = ray_start_cluster_head
num_nodes = 5
worker_nodes = [cluster.add_node() for _ in range(num_nodes)]
@ray.remote(max_reconstructions=1)
class Counter:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
return self.x
@ray.remote
def inc(actor_handle):
return ray.get(actor_handle.inc.remote())
# Make sure all of the actors have started.
actors = [Counter.remote() for _ in range(10)]
ray.get([actor.inc.remote() for actor in actors])
# Kill a node.
cluster.remove_node(worker_nodes[0])
# Submit several tasks per actor. These should be randomly scheduled to the
# nodes, so that multiple nodes will detect and try to reconstruct the
# actor that died, but only one should succeed.
results = []
for _ in range(10):
results += [inc.remote(actor) for actor in actors]
# Make sure that we can get the results from the reconstructed actor.
results = ray.get(results)
def setup_counter_actor(test_checkpoint=False,
save_exception=False,
resume_exception=False):
# Only set the checkpoint interval if we're testing with checkpointing.
checkpoint_interval = -1
if test_checkpoint:
checkpoint_interval = 5
@ray.remote(checkpoint_interval=checkpoint_interval)
class Counter:
_resume_exception = resume_exception
def __init__(self, save_exception):
self.x = 0
self.num_inc_calls = 0
self.save_exception = save_exception
self.restored = False
def node_id(self):
return ray.worker.global_worker.node.unique_id
def inc(self, *xs):
self.x += 1
self.num_inc_calls += 1
return self.x
def get_num_inc_calls(self):
return self.num_inc_calls
def test_restore(self):
# This method will only return True if __ray_restore__ has been
# called.
return self.restored
def __ray_save__(self):
if self.save_exception:
raise Exception("Exception raised in checkpoint save")
return self.x, self.save_exception
def __ray_restore__(self, checkpoint):
if self._resume_exception:
raise Exception("Exception raised in checkpoint resume")
self.x, self.save_exception = checkpoint
self.num_inc_calls = 0
self.restored = True
node_id = ray.worker.global_worker.node.unique_id
# Create an actor that is not on the raylet.
actor = Counter.remote(save_exception)
while ray.get(actor.node_id.remote()) == node_id:
actor = Counter.remote(save_exception)
args = [ray.put(0) for _ in range(100)]
ids = [actor.inc.remote(*args[i:]) for i in range(100)]
return actor, ids
@pytest.mark.skip("Fork/join consistency not yet implemented.")
def test_distributed_handle(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
counter, ids = setup_counter_actor(test_checkpoint=False)
@ray.remote
def fork_many_incs(counter, num_incs):
x = None
for _ in range(num_incs):
x = counter.inc.remote()
# Only call ray.get() on the last task submitted.
return ray.get(x)
# Fork num_iters times.
count = ray.get(ids[-1])
num_incs = 100
num_iters = 10
forks = [
fork_many_incs.remote(counter, num_incs) for _ in range(num_iters)
]
ray.wait(forks, num_returns=len(forks))
count += num_incs * num_iters
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding raylet to exit.
cluster.list_all_nodes()[1].kill_plasma_store(wait=True)
# Check that the actor did not restore from a checkpoint.
assert not ray.get(counter.test_restore.remote())
# Check that we can submit another call on the actor and get the
# correct counter result.
x = ray.get(counter.inc.remote())
assert x == count + 1
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_remote_checkpoint_distributed_handle(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
counter, ids = setup_counter_actor(test_checkpoint=True)
@ray.remote
def fork_many_incs(counter, num_incs):
x = None
for _ in range(num_incs):
x = counter.inc.remote()
# Only call ray.get() on the last task submitted.
return ray.get(x)
# Fork num_iters times.
count = ray.get(ids[-1])
num_incs = 100
num_iters = 10
forks = [
fork_many_incs.remote(counter, num_incs) for _ in range(num_iters)
]
ray.wait(forks, num_returns=len(forks))
ray.wait([counter.__ray_checkpoint__.remote()])
count += num_incs * num_iters
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding raylet to exit.
cluster.list_all_nodes()[1].kill_plasma_store(wait=True)
# Check that the actor restored from a checkpoint.
assert ray.get(counter.test_restore.remote())
# Check that the number of inc calls since actor initialization is
# exactly zero, since there could not have been another inc call since
# the remote checkpoint.
num_inc_calls = ray.get(counter.get_num_inc_calls.remote())
assert num_inc_calls == 0
# Check that we can submit another call on the actor and get the
# correct counter result.
x = ray.get(counter.inc.remote())
assert x == count + 1
@pytest.mark.skip("Fork/join consistency not yet implemented.")
def test_checkpoint_distributed_handle(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
counter, ids = setup_counter_actor(test_checkpoint=True)
@ray.remote
def fork_many_incs(counter, num_incs):
x = None
for _ in range(num_incs):
x = counter.inc.remote()
# Only call ray.get() on the last task submitted.
return ray.get(x)
# Fork num_iters times.
count = ray.get(ids[-1])
num_incs = 100
num_iters = 10
forks = [
fork_many_incs.remote(counter, num_incs) for _ in range(num_iters)
]
ray.wait(forks, num_returns=len(forks))
count += num_incs * num_iters
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding raylet to exit.
cluster.list_all_nodes()[1].kill_plasma_store(wait=True)
# Check that the actor restored from a checkpoint.
assert ray.get(counter.test_restore.remote())
# Check that we can submit another call on the actor and get the
# correct counter result.
x = ray.get(counter.inc.remote())
assert x == count + 1
def _test_nondeterministic_reconstruction(
cluster, num_forks, num_items_per_fork, num_forks_to_wait):
# Make a shared queue.
@ray.remote
class Queue:
def __init__(self):
self.queue = []
def node_id(self):
return ray.worker.global_worker.node.unique_id
def push(self, item):
self.queue.append(item)
def read(self):
return self.queue
# Schedule the shared queue onto the remote raylet.
node_id = ray.worker.global_worker.node.unique_id
actor = Queue.remote()
while ray.get(actor.node_id.remote()) == node_id:
actor = Queue.remote()
# A task that takes in the shared queue and a list of items to enqueue,
# one by one.
@ray.remote
def enqueue(queue, items):
done = None
for item in items:
done = queue.push.remote(item)
# TODO(swang): Return the object ID returned by the last method
# called on the shared queue, so that the caller of enqueue can
# wait for all of the queue methods to complete. This can be
# removed once join consistency is implemented.
return [done]
# Call the enqueue task num_forks times, each with num_items_per_fork
# unique objects to push onto the shared queue.
enqueue_tasks = []
for fork in range(num_forks):
enqueue_tasks.append(
enqueue.remote(actor,
[(fork, i) for i in range(num_items_per_fork)]))
# Wait for the forks to complete their tasks.
enqueue_tasks = ray.get(enqueue_tasks)
enqueue_tasks = [fork_ids[0] for fork_ids in enqueue_tasks]
ray.wait(enqueue_tasks, num_returns=num_forks_to_wait)
# Read the queue to get the initial order of execution.
queue = ray.get(actor.read.remote())
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding raylet to exit.
cluster.list_all_nodes()[1].kill_plasma_store(wait=True)
# Read the queue again and check for deterministic reconstruction.
ray.get(enqueue_tasks)
reconstructed_queue = ray.get(actor.read.remote())
# Make sure the final queue has all items from all forks.
assert len(reconstructed_queue) == num_forks * num_items_per_fork
# Make sure that the prefix of the final queue matches the queue from
# the initial execution.
assert queue == reconstructed_queue[:len(queue)]
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Currently doesn't work with the new GCS.")
def test_nondeterministic_reconstruction(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
_test_nondeterministic_reconstruction(cluster, 10, 100, 10)
@pytest.mark.skip("Nondeterministic reconstruction currently not supported "
"when there are concurrent forks that didn't finish "
"initial execution.")
def test_nondeterministic_reconstruction_concurrent_forks(
ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
_test_nondeterministic_reconstruction(cluster, 10, 100, 1)
@pytest.fixture
def setup_queue_actor():
ray.init(num_cpus=1, object_store_memory=int(150 * 1024 * 1024))
@ray.remote
class Queue:
def __init__(self):
self.queue = []
def enqueue(self, key, item):
self.queue.append((key, item))
def read(self):
return self.queue
queue = Queue.remote()
# Make sure queue actor is initialized.
ray.get(queue.read.remote())
yield queue
# The code after the yield will run as teardown code.
ray.shutdown()
def test_fork(setup_queue_actor):
queue = setup_queue_actor
@ray.remote
def fork(queue, key, item):
# ray.get here could be blocked and cause ray to start
# a lot of python workers.
return ray.get(queue.enqueue.remote(key, item))
# Fork num_iters times.
num_iters = 100
ray.get([fork.remote(queue, i, 0) for i in range(num_iters)])
items = ray.get(queue.read.remote())
for i in range(num_iters):
filtered_items = [item[1] for item in items if item[0] == i]
assert filtered_items == list(range(1))
def test_fork_consistency(setup_queue_actor):
queue = setup_queue_actor
@ray.remote
def fork(queue, key, num_items):
x = None
for item in range(num_items):
x = queue.enqueue.remote(key, item)
return ray.get(x)
# Fork num_iters times.
num_forks = 5
num_items_per_fork = 100
# Submit some tasks on new actor handles.
forks = [
fork.remote(queue, i, num_items_per_fork) for i in range(num_forks)
]
# Submit some more tasks on the original actor handle.
for item in range(num_items_per_fork):
local_fork = queue.enqueue.remote(num_forks, item)
forks.append(local_fork)
# Wait for tasks from all handles to complete.
ray.get(forks)
# Check that all tasks from all handles have completed.
items = ray.get(queue.read.remote())
for i in range(num_forks + 1):
filtered_items = [item[1] for item in items if item[0] == i]
assert filtered_items == list(range(num_items_per_fork))
def test_pickled_handle_consistency(setup_queue_actor):
queue = setup_queue_actor
@ray.remote
def fork(pickled_queue, key, num_items):
queue = ray.worker.pickle.loads(pickled_queue)
x = None
for item in range(num_items):
x = queue.enqueue.remote(key, item)
return ray.get(x)
# Fork num_iters times.
num_forks = 10
num_items_per_fork = 100
# Submit some tasks on the pickled actor handle.
new_queue = ray.worker.pickle.dumps(queue)
forks = [
fork.remote(new_queue, i, num_items_per_fork) for i in range(num_forks)
]
# Submit some more tasks on the original actor handle.
for item in range(num_items_per_fork):
local_fork = queue.enqueue.remote(num_forks, item)
forks.append(local_fork)
# Wait for tasks from all handles to complete.
ray.get(forks)
# Check that all tasks from all handles have completed.
items = ray.get(queue.read.remote())
for i in range(num_forks + 1):
filtered_items = [item[1] for item in items if item[0] == i]
assert filtered_items == list(range(num_items_per_fork))
def test_nested_fork(setup_queue_actor):
queue = setup_queue_actor
@ray.remote
def fork(queue, key, num_items):
x = None
for item in range(num_items):
x = queue.enqueue.remote(key, item)
return ray.get(x)
@ray.remote
def nested_fork(queue, key, num_items):
# Pass the actor into a nested task.
ray.get(fork.remote(queue, key + 1, num_items))
x = None
for item in range(num_items):
x = queue.enqueue.remote(key, item)
return ray.get(x)
# Fork num_iters times.
num_forks = 10
num_items_per_fork = 100
# Submit some tasks on new actor handles.
forks = [
nested_fork.remote(queue, i, num_items_per_fork)
for i in range(0, num_forks, 2)
]
ray.get(forks)
# Check that all tasks from all handles have completed.
items = ray.get(queue.read.remote())
for i in range(num_forks):
filtered_items = [item[1] for item in items if item[0] == i]
assert filtered_items == list(range(num_items_per_fork))
@pytest.mark.skip("Garbage collection for distributed actor handles not "
"implemented.")
def test_garbage_collection(setup_queue_actor):
queue = setup_queue_actor
@ray.remote
def fork(queue):
for i in range(10):
x = queue.enqueue.remote(0, i)
time.sleep(0.1)
return ray.get(x)
x = fork.remote(queue)
ray.get(queue.read.remote())
del queue
print(ray.get(x))
def test_calling_put_on_actor_handle(ray_start_regular):
@ray.remote
class Counter:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
return self.x
@ray.remote
def f():
return Counter.remote()
@ray.remote
def g():
return [Counter.remote()]
# Currently, calling ray.put on an actor handle is allowed, but is
# there a good use case?
counter = Counter.remote()
counter_id = ray.put(counter)
new_counter = ray.get(counter_id)
assert ray.get(new_counter.inc.remote()) == 1
assert ray.get(counter.inc.remote()) == 2
assert ray.get(new_counter.inc.remote()) == 3
with pytest.raises(Exception):
ray.get(f.remote())
# The below test works, but do we want to disallow this usage?
ray.get(g.remote())
def test_pickling_actor_handle(ray_start_regular):
@ray.remote
class Foo:
def method(self):
pass
f = Foo.remote()
new_f = ray.worker.pickle.loads(ray.worker.pickle.dumps(f))
# Verify that we can call a method on the unpickled handle. TODO(rkn):
# we should also test this from a different driver.
ray.get(new_f.method.remote())
def test_pickled_actor_handle_call_in_method_twice(ray_start_regular):
@ray.remote
class Actor1:
def f(self):
return 1
@ray.remote
class Actor2:
def __init__(self, constructor):
self.actor = constructor()
def step(self):
ray.get(self.actor.f.remote())
a = Actor1.remote()
b = Actor2.remote(lambda: a)
ray.get(b.step.remote())
ray.get(b.step.remote())
def test_register_and_get_named_actors(ray_start_regular):
# TODO(heyucongtom): We should test this from another driver.
@ray.remote
class Foo:
def __init__(self):
self.x = 0
def method(self):
self.x += 1
return self.x
f1 = Foo.remote()
# Test saving f.
ray.experimental.register_actor("f1", f1)
# Test getting f.
f2 = ray.experimental.get_actor("f1")
assert f1._actor_id == f2._actor_id
# Test same name register shall raise error.
with pytest.raises(ValueError):
ray.experimental.register_actor("f1", f2)
# Test register with wrong object type.
with pytest.raises(TypeError):
ray.experimental.register_actor("f3", 1)
# Test getting a nonexistent actor.
with pytest.raises(ValueError):
ray.experimental.get_actor("nonexistent")
# Test method
assert ray.get(f1.method.remote()) == 1
assert ray.get(f2.method.remote()) == 2
assert ray.get(f1.method.remote()) == 3
assert ray.get(f2.method.remote()) == 4
def test_detached_actor(ray_start_regular):
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
with pytest.raises(Exception, match="Detached actors must be named"):
DetachedActor._remote(detached=True)
with pytest.raises(ValueError, match="Please use a different name"):
_ = DetachedActor._remote(name="d_actor")
DetachedActor._remote(name="d_actor")
redis_address = ray_start_regular["redis_address"]
actor_name = "DetachedActor"
driver_script = """
import ray
ray.init(address="{}")
@ray.remote
class DetachedActor:
def ping(self):
return "pong"
actor = DetachedActor._remote(name="{}", detached=True)
ray.get(actor.ping.remote())
""".format(redis_address, actor_name)
run_string_as_driver(driver_script)
detached_actor = ray.experimental.get_actor(actor_name)
assert ray.get(detached_actor.ping.remote()) == "pong"
def test_kill(ray_start_regular):
@ray.remote
class Actor:
def hang(self):
while True:
time.sleep(1)
actor = Actor.remote()
result = actor.hang.remote()
ready, _ = ray.wait([result], timeout=0.5)
assert len(ready) == 0
actor.__ray_kill__()
with pytest.raises(ray.exceptions.RayActorError):
ray.get(result)
# This test verifies actor creation task failure will not
# hang the caller.
def test_actor_creation_task_crash(ray_start_regular):
# Test actor death in constructor.
@ray.remote(max_reconstructions=0)
class Actor:
def __init__(self):
print("crash")
os._exit(0)
def f(self):
return "ACTOR OK"
# Verify an exception is thrown.
a = Actor.remote()
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.f.remote())
# Test an actor can be reconstructed successfully
# afte it dies in its constructor.
@ray.remote(max_reconstructions=3)
class ReconstructableActor:
def __init__(self):
count = self.get_count()
count += 1
# Make it die for the first 2 times.
if count < 3:
self.set_count(count)
print("crash: " + str(count))
os._exit(0)
else:
print("no crash")
def f(self):
return "ACTOR OK"
def get_count(self):
value = _internal_kv_get("count")
if value is None:
count = 0
else:
count = int(value)
return count
def set_count(self, count):
_internal_kv_put("count", count, True)
# Verify we can get the object successfully.
ra = ReconstructableActor.remote()
ray.get(ra.f.remote())
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_actor_failures.py
|
Python
|
import collections
import json
import numpy as np
import os
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import signal
import sys
import time
import ray
import ray.ray_constants as ray_constants
import ray.test_utils
import ray.cluster_utils
from ray.test_utils import (relevant_errors, wait_for_condition,
wait_for_errors, wait_for_pid_to_exit,
generate_internal_config_map)
@pytest.fixture
def ray_checkpointable_actor_cls(request):
checkpoint_dir = "/tmp/ray_temp_checkpoint_dir/"
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
class CheckpointableActor(ray.actor.Checkpointable):
def __init__(self):
self.value = 0
self.resumed_from_checkpoint = False
self.checkpoint_dir = checkpoint_dir
def node_id(self):
return ray.worker.global_worker.node.unique_id
def increase(self):
self.value += 1
return self.value
def get(self):
return self.value
def was_resumed_from_checkpoint(self):
return self.resumed_from_checkpoint
def get_pid(self):
return os.getpid()
def should_checkpoint(self, checkpoint_context):
# Checkpoint the actor when value is increased to 3.
should_checkpoint = self.value == 3
return should_checkpoint
def save_checkpoint(self, actor_id, checkpoint_id):
actor_id, checkpoint_id = actor_id.hex(), checkpoint_id.hex()
# Save checkpoint into a file.
with open(self.checkpoint_dir + actor_id, "a+") as f:
print(checkpoint_id, self.value, file=f)
def load_checkpoint(self, actor_id, available_checkpoints):
actor_id = actor_id.hex()
filename = self.checkpoint_dir + actor_id
# Load checkpoint from the file.
if not os.path.isfile(filename):
return None
available_checkpoint_ids = [
c.checkpoint_id for c in available_checkpoints
]
with open(filename, "r") as f:
for line in f:
checkpoint_id, value = line.strip().split(" ")
checkpoint_id = ray.ActorCheckpointID(
ray.utils.hex_to_binary(checkpoint_id))
if checkpoint_id in available_checkpoint_ids:
self.value = int(value)
self.resumed_from_checkpoint = True
return checkpoint_id
return None
def checkpoint_expired(self, actor_id, checkpoint_id):
pass
return CheckpointableActor
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_actor_eviction(ray_start_object_store_memory):
object_store_memory = ray_start_object_store_memory
@ray.remote
class Actor:
def __init__(self):
pass
def create_object(self, size):
return np.random.rand(size)
a = Actor.remote()
# Submit enough methods on the actor so that they exceed the size of the
# object store.
objects = []
num_objects = 20
for _ in range(num_objects):
obj = a.create_object.remote(object_store_memory // num_objects)
objects.append(obj)
# Get each object once to make sure each object gets created.
ray.get(obj)
# Get each object again. At this point, the earlier objects should have
# been evicted.
num_evicted, num_success = 0, 0
for obj in objects:
try:
val = ray.get(obj)
assert isinstance(val, np.ndarray), val
num_success += 1
except ray.exceptions.UnreconstructableError:
num_evicted += 1
# Some objects should have been evicted, and some should still be in the
# object store.
assert num_evicted > 0
assert num_success > 0
def test_actor_reconstruction(ray_start_regular):
"""Test actor reconstruction when actor process is killed."""
@ray.remote(max_reconstructions=1)
class ReconstructableActor:
"""An actor that will be reconstructed at most once."""
def __init__(self):
self.value = 0
def increase(self, delay=0):
time.sleep(delay)
self.value += 1
return self.value
def get_pid(self):
return os.getpid()
actor = ReconstructableActor.remote()
pid = ray.get(actor.get_pid.remote())
# Call increase 3 times
for _ in range(3):
ray.get(actor.increase.remote())
# Call increase again with some delay.
result = actor.increase.remote(delay=0.5)
# Sleep some time to wait for the above task to start execution.
time.sleep(0.2)
# Kill actor process, while the above task is still being executed.
os.kill(pid, signal.SIGKILL)
# Check that the above task didn't fail and the actor is reconstructed.
assert ray.get(result) == 4
# Check that we can still call the actor.
assert ray.get(actor.increase.remote()) == 5
# kill actor process one more time.
pid = ray.get(actor.get_pid.remote())
os.kill(pid, signal.SIGKILL)
# The actor has exceeded max reconstructions, and this task should fail.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
# Create another actor.
actor = ReconstructableActor.remote()
# Intentionlly exit the actor
actor.__ray_terminate__.remote()
# Check that the actor won't be reconstructed.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
def test_actor_reconstruction_without_task(ray_start_regular):
"""Test a dead actor can be reconstructed without sending task to it."""
@ray.remote(max_reconstructions=1)
class ReconstructableActor:
def __init__(self, obj_ids):
for obj_id in obj_ids:
# Every time the actor gets constructed,
# put a new object in plasma store.
global_worker = ray.worker.global_worker
if not global_worker.core_worker.object_exists(obj_id):
global_worker.put_object(1, obj_id)
break
def get_pid(self):
return os.getpid()
obj_ids = [ray.ObjectID.from_random() for _ in range(2)]
actor = ReconstructableActor.remote(obj_ids)
# Kill the actor.
pid = ray.get(actor.get_pid.remote())
os.kill(pid, signal.SIGKILL)
# Wait until the actor is reconstructed.
assert wait_for_condition(
lambda: ray.worker.global_worker.core_worker.object_exists(obj_ids[1]),
timeout_ms=5000)
def test_actor_reconstruction_on_node_failure(ray_start_cluster_head):
"""Test actor reconstruction when node dies unexpectedly."""
cluster = ray_start_cluster_head
max_reconstructions = 3
# Add a few nodes to the cluster.
# Use custom resource to make sure the actor is only created on worker
# nodes, not on the head node.
for _ in range(max_reconstructions + 2):
cluster.add_node(
resources={"a": 1},
_internal_config=json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
}),
)
def kill_node(node_id):
node_to_remove = None
for node in cluster.worker_nodes:
if node_id == node.unique_id:
node_to_remove = node
cluster.remove_node(node_to_remove)
@ray.remote(max_reconstructions=max_reconstructions, resources={"a": 1})
class MyActor:
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
def get_object_store_socket(self):
return ray.worker.global_worker.node.unique_id
actor = MyActor.remote()
# Call increase 3 times.
for _ in range(3):
ray.get(actor.increase.remote())
for i in range(max_reconstructions):
object_store_socket = ray.get(actor.get_object_store_socket.remote())
# Kill actor's node and the actor should be reconstructed
# on a different node.
kill_node(object_store_socket)
# Call increase again.
# Check that the actor is reconstructed and value is correct.
assert ray.get(actor.increase.remote()) == 4 + i
# Check that the actor is now on a different node.
assert object_store_socket != ray.get(
actor.get_object_store_socket.remote())
# kill the node again.
object_store_socket = ray.get(actor.get_object_store_socket.remote())
kill_node(object_store_socket)
# The actor has exceeded max reconstructions, and this task should fail.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
# NOTE(hchen): we set initial_reconstruction_timeout_milliseconds to 1s for
# this test. Because if this value is too small, suprious task reconstruction
# may happen and cause the test fauilure. If the value is too large, this test
# could be very slow. We can remove this once we support dynamic timeout.
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_internal_config_map(
initial_reconstruction_timeout_milliseconds=1000)
],
indirect=True)
def test_multiple_actor_reconstruction(ray_start_cluster_head):
cluster = ray_start_cluster_head
# This test can be made more stressful by increasing the numbers below.
# The total number of actors created will be
# num_actors_at_a_time * num_nodes.
num_nodes = 5
num_actors_at_a_time = 3
num_function_calls_at_a_time = 10
worker_nodes = [
cluster.add_node(
num_cpus=3,
_internal_config=json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
})) for _ in range(num_nodes)
]
@ray.remote(max_reconstructions=ray.ray_constants.INFINITE_RECONSTRUCTION)
class SlowCounter:
def __init__(self):
self.x = 0
def inc(self, duration):
time.sleep(duration)
self.x += 1
return self.x
# Create some initial actors.
actors = [SlowCounter.remote() for _ in range(num_actors_at_a_time)]
# Wait for the actors to start up.
time.sleep(1)
# This is a mapping from actor handles to object IDs returned by
# methods on that actor.
result_ids = collections.defaultdict(lambda: [])
# In a loop we are going to create some actors, run some methods, kill
# a raylet, and run some more methods.
for node in worker_nodes:
# Create some actors.
actors.extend(
[SlowCounter.remote() for _ in range(num_actors_at_a_time)])
# Run some methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(actor.inc.remote(j**2 * 0.000001))
# Kill a node.
cluster.remove_node(node)
# Run some more methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(actor.inc.remote(j**2 * 0.000001))
# Get the results and check that they have the correct values.
for _, result_id_list in result_ids.items():
results = list(range(1, len(result_id_list) + 1))
assert ray.get(result_id_list) == results
def kill_actor(actor):
"""A helper function that kills an actor process."""
pid = ray.get(actor.get_pid.remote())
os.kill(pid, signal.SIGKILL)
wait_for_pid_to_exit(pid)
def test_checkpointing(ray_start_regular, ray_checkpointable_actor_cls):
"""Test actor checkpointing and restoring from a checkpoint."""
actor = ray.remote(
max_reconstructions=2)(ray_checkpointable_actor_cls).remote()
# Call increase 3 times, triggering a checkpoint.
expected = 0
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Assert that the actor wasn't resumed from a checkpoint.
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Kill actor process.
kill_actor(actor)
# Assert that the actor was resumed from a checkpoint and its value is
# still correct.
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is True
# Submit some more tasks. These should get replayed since they happen after
# the checkpoint.
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Kill actor again and check that reconstruction still works after the
# actor resuming from a checkpoint.
kill_actor(actor)
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is True
def test_remote_checkpointing(ray_start_regular, ray_checkpointable_actor_cls):
"""Test checkpointing of a remote actor through method invocation."""
# Define a class that exposes a method to save checkpoints.
class RemoteCheckpointableActor(ray_checkpointable_actor_cls):
def __init__(self):
super(RemoteCheckpointableActor, self).__init__()
self._should_checkpoint = False
def checkpoint(self):
self._should_checkpoint = True
def should_checkpoint(self, checkpoint_context):
should_checkpoint = self._should_checkpoint
self._should_checkpoint = False
return should_checkpoint
cls = ray.remote(max_reconstructions=2)(RemoteCheckpointableActor)
actor = cls.remote()
# Call increase 3 times.
expected = 0
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Call a checkpoint task.
actor.checkpoint.remote()
# Assert that the actor wasn't resumed from a checkpoint.
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Kill actor process.
kill_actor(actor)
# Assert that the actor was resumed from a checkpoint and its value is
# still correct.
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is True
# Submit some more tasks. These should get replayed since they happen after
# the checkpoint.
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Kill actor again and check that reconstruction still works after the
# actor resuming from a checkpoint.
kill_actor(actor)
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is True
def test_checkpointing_on_node_failure(ray_start_cluster_2_nodes,
ray_checkpointable_actor_cls):
"""Test actor checkpointing on a remote node."""
# Place the actor on the remote node.
cluster = ray_start_cluster_2_nodes
remote_node = list(cluster.worker_nodes)
actor_cls = ray.remote(max_reconstructions=1)(ray_checkpointable_actor_cls)
actor = actor_cls.remote()
while (ray.get(actor.node_id.remote()) != remote_node[0].unique_id):
actor = actor_cls.remote()
# Call increase several times.
expected = 0
for _ in range(6):
ray.get(actor.increase.remote())
expected += 1
# Assert that the actor wasn't resumed from a checkpoint.
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Kill actor process.
cluster.remove_node(remote_node[0])
# Assert that the actor was resumed from a checkpoint and its value is
# still correct.
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is True
def test_checkpointing_save_exception(ray_start_regular,
ray_checkpointable_actor_cls):
"""Test actor can still be recovered if checkpoints fail to complete."""
@ray.remote(max_reconstructions=2)
class RemoteCheckpointableActor(ray_checkpointable_actor_cls):
def save_checkpoint(self, actor_id, checkpoint_context):
raise Exception("Intentional error saving checkpoint.")
actor = RemoteCheckpointableActor.remote()
# Call increase 3 times, triggering a checkpoint that will fail.
expected = 0
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Assert that the actor wasn't resumed from a checkpoint.
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Kill actor process.
kill_actor(actor)
# Assert that the actor still wasn't resumed from a checkpoint and its
# value is still correct.
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Submit some more tasks. These should get replayed since they happen after
# the checkpoint.
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Kill actor again, and check that reconstruction still works and the actor
# wasn't resumed from a checkpoint.
kill_actor(actor)
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Check that the checkpoint error was pushed to the driver.
wait_for_errors(ray_constants.CHECKPOINT_PUSH_ERROR, 1)
def test_checkpointing_load_exception(ray_start_regular,
ray_checkpointable_actor_cls):
"""Test actor can still be recovered if checkpoints fail to load."""
@ray.remote(max_reconstructions=2)
class RemoteCheckpointableActor(ray_checkpointable_actor_cls):
def load_checkpoint(self, actor_id, checkpoints):
raise Exception("Intentional error loading checkpoint.")
actor = RemoteCheckpointableActor.remote()
# Call increase 3 times, triggering a checkpoint that will succeed.
expected = 0
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Assert that the actor wasn't resumed from a checkpoint because loading
# it failed.
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Kill actor process.
kill_actor(actor)
# Assert that the actor still wasn't resumed from a checkpoint and its
# value is still correct.
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Submit some more tasks. These should get replayed since they happen after
# the checkpoint.
for _ in range(3):
ray.get(actor.increase.remote())
expected += 1
# Kill actor again, and check that reconstruction still works and the actor
# wasn't resumed from a checkpoint.
kill_actor(actor)
assert ray.get(actor.get.remote()) == expected
assert ray.get(actor.was_resumed_from_checkpoint.remote()) is False
# Check that the checkpoint error was pushed to the driver.
wait_for_errors(ray_constants.CHECKPOINT_PUSH_ERROR, 1)
@pytest.mark.parametrize(
"ray_start_regular",
# This overwrite currently isn't effective,
# see https://github.com/ray-project/ray/issues/3926.
[generate_internal_config_map(num_actor_checkpoints_to_keep=20)],
indirect=True,
)
def test_deleting_actor_checkpoint(ray_start_regular):
"""Test deleting old actor checkpoints."""
@ray.remote
class CheckpointableActor(ray.actor.Checkpointable):
def __init__(self):
self.checkpoint_ids = []
def get_checkpoint_ids(self):
return self.checkpoint_ids
def should_checkpoint(self, checkpoint_context):
# Save checkpoints after every task
return True
def save_checkpoint(self, actor_id, checkpoint_id):
self.checkpoint_ids.append(checkpoint_id)
pass
def load_checkpoint(self, actor_id, available_checkpoints):
pass
def checkpoint_expired(self, actor_id, checkpoint_id):
assert checkpoint_id == self.checkpoint_ids[0]
del self.checkpoint_ids[0]
actor = CheckpointableActor.remote()
for i in range(19):
assert len(ray.get(actor.get_checkpoint_ids.remote())) == i + 1
for _ in range(20):
assert len(ray.get(actor.get_checkpoint_ids.remote())) == 20
def test_bad_checkpointable_actor_class():
"""Test error raised if an actor class doesn't implement all abstract
methods in the Checkpointable interface."""
with pytest.raises(TypeError):
@ray.remote
class BadCheckpointableActor(ray.actor.Checkpointable):
def should_checkpoint(self, checkpoint_context):
return True
def test_init_exception_in_checkpointable_actor(ray_start_regular,
ray_checkpointable_actor_cls):
# This test is similar to test_failure.py::test_failed_actor_init.
# This test is used to guarantee that checkpointable actor does not
# break the same logic.
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class CheckpointableFailedActor(ray_checkpointable_actor_cls):
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
def should_checkpoint(self, checkpoint_context):
return True
a = CheckpointableFailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_decorated_method(ray_start_regular):
def method_invocation_decorator(f):
def new_f_invocation(args, kwargs):
# Split one argument into two. Return th kwargs without passing
# them into the actor.
return f([args[0], args[0]], {}), kwargs
return new_f_invocation
def method_execution_decorator(f):
def new_f_execution(self, b, c):
# Turn two arguments into one.
return f(self, b + c)
new_f_execution.__ray_invocation_decorator__ = (
method_invocation_decorator)
return new_f_execution
@ray.remote
class Actor:
@method_execution_decorator
def decorated_method(self, x):
return x + 1
a = Actor.remote()
object_id, extra = a.decorated_method.remote(3, kwarg=3)
assert isinstance(object_id, ray.ObjectID)
assert extra == {"kwarg": 3}
assert ray.get(object_id) == 7 # 2 * 3 + 1
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(20)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 2,
}], indirect=True)
def test_ray_wait_dead_actor(ray_start_cluster):
"""Tests that methods completed by dead actors are returned as ready"""
cluster = ray_start_cluster
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def node_id(self):
return ray.worker.global_worker.node.unique_id
def ping(self):
time.sleep(1)
# Create some actors and wait for them to initialize.
num_nodes = len(cluster.list_all_nodes())
actors = [Actor.remote() for _ in range(num_nodes)]
ray.get([actor.ping.remote() for actor in actors])
# Ping the actors and make sure the tasks complete.
ping_ids = [actor.ping.remote() for actor in actors]
ray.get(ping_ids)
# Evict the result from the node that we're about to kill.
remote_node = cluster.list_all_nodes()[-1]
remote_ping_id = None
for i, actor in enumerate(actors):
if ray.get(actor.node_id.remote()) == remote_node.unique_id:
remote_ping_id = ping_ids[i]
ray.internal.free([remote_ping_id], local_only=True)
cluster.remove_node(remote_node)
# Repeatedly call ray.wait until the exception for the dead actor is
# received.
unready = ping_ids[:]
while unready:
_, unready = ray.wait(unready, timeout=0)
time.sleep(1)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(ping_ids)
# Evict the result from the dead node.
ray.internal.free([remote_ping_id], local_only=True)
# Create an actor on the local node that will call ray.wait in a loop.
head_node_resource = "HEAD_NODE"
ray.experimental.set_resource(head_node_resource, 1)
@ray.remote(num_cpus=0, resources={head_node_resource: 1})
class ParentActor:
def __init__(self, ping_ids):
self.unready = ping_ids
def wait(self):
_, self.unready = ray.wait(self.unready, timeout=0)
return len(self.unready) == 0
def ping(self):
return
# Repeatedly call ray.wait through the local actor until the exception for
# the dead actor is received.
parent_actor = ParentActor.remote(ping_ids)
ray.get(parent_actor.ping.remote())
failure_detected = False
while not failure_detected:
failure_detected = ray.get(parent_actor.wait.remote())
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_actor_pool.py
|
Python
|
import time
import pytest
import ray
from ray.experimental import ActorPool
@pytest.fixture
def init():
ray.init(num_cpus=4)
yield
ray.shutdown()
def test_get_next(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
return x + 1
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
for i in range(5):
pool.submit(lambda a, v: a.f.remote(v), i)
assert pool.get_next() == i + 1
def test_get_next_unordered(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
return x + 1
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
total = []
for i in range(5):
pool.submit(lambda a, v: a.f.remote(v), i)
while pool.has_next():
total += [pool.get_next_unordered()]
assert all(elem in [1, 2, 3, 4, 5] for elem in total)
def test_map(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
return x + 1
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
index = 0
for v in pool.map(lambda a, v: a.double.remote(v), range(5)):
assert v == 2 * index
index += 1
def test_map_unordered(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
return x + 1
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
total = []
for v in pool.map(lambda a, v: a.double.remote(v), range(5)):
total += [v]
assert all(elem in [0, 2, 4, 6, 8] for elem in total)
def test_get_next_timeout(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
while (True):
x = x + 1
time.sleep(1)
return None
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
pool.submit(lambda a, v: a.f.remote(v), 0)
with pytest.raises(TimeoutError):
pool.get_next_unordered(5)
def test_get_next_unordered_timeout(init):
@ray.remote
class MyActor:
def __init__(self):
pass
def f(self, x):
while (True):
x + 1
time.sleep(1)
return
def double(self, x):
return 2 * x
actors = [MyActor.remote() for _ in range(4)]
pool = ActorPool(actors)
pool.submit(lambda a, v: a.f.remote(v), 0)
with pytest.raises(TimeoutError):
pool.get_next_unordered(5)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_actor_resources.py
|
Python
|
import collections
import json
import os
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import sys
import time
import ray
import ray.test_utils
import ray.cluster_utils
from ray import ray_constants
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
def test_actor_deletion_with_gpus(shutdown_only):
ray.init(
num_cpus=1, num_gpus=1, object_store_memory=int(150 * 1024 * 1024))
# When an actor that uses a GPU exits, make sure that the GPU resources
# are released.
@ray.remote(num_gpus=1)
class Actor:
def getpid(self):
return os.getpid()
for _ in range(5):
# If we can successfully create an actor, that means that enough
# GPU resources are available.
a = Actor.remote()
ray.get(a.getpid.remote())
def test_actor_state(ray_start_regular):
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
def value(self):
return self.value
c1 = Counter.remote()
c1.increase.remote()
assert ray.get(c1.value.remote()) == 1
c2 = Counter.remote()
c2.increase.remote()
c2.increase.remote()
assert ray.get(c2.value.remote()) == 2
def test_actor_class_methods(ray_start_regular):
class Foo:
x = 2
@classmethod
def as_remote(cls):
return ray.remote(cls)
@classmethod
def f(cls):
return cls.x
@classmethod
def g(cls, y):
return cls.x + y
def echo(self, value):
return value
a = Foo.as_remote().remote()
assert ray.get(a.echo.remote(2)) == 2
assert ray.get(a.f.remote()) == 2
assert ray.get(a.g.remote(2)) == 4
@pytest.mark.skipif(RAY_FORCE_DIRECT, reason="no actor method resources")
def test_resource_assignment(shutdown_only):
"""Test to make sure that we assign resource to actors at instantiation."""
# This test will create 16 actors. Declaring this many CPUs initially will
# speed up the test because the workers will be started ahead of time.
ray.init(
num_cpus=16,
num_gpus=1,
resources={"Custom": 1},
object_store_memory=int(150 * 1024 * 1024))
class Actor:
def __init__(self):
self.resources = ray.get_resource_ids()
def get_actor_resources(self):
return self.resources
def get_actor_method_resources(self):
return ray.get_resource_ids()
decorator_resource_args = [{}, {
"num_cpus": 0.1
}, {
"num_gpus": 0.1
}, {
"resources": {
"Custom": 0.1
}
}]
instantiation_resource_args = [{}, {
"num_cpus": 0.2
}, {
"num_gpus": 0.2
}, {
"resources": {
"Custom": 0.2
}
}]
for decorator_args in decorator_resource_args:
for instantiation_args in instantiation_resource_args:
if len(decorator_args) == 0:
actor_class = ray.remote(Actor)
else:
actor_class = ray.remote(**decorator_args)(Actor)
actor = actor_class._remote(**instantiation_args)
actor_resources = ray.get(actor.get_actor_resources.remote())
actor_method_resources = ray.get(
actor.get_actor_method_resources.remote())
if len(decorator_args) == 0 and len(instantiation_args) == 0:
assert len(actor_resources) == 0, (
"Actor should not be assigned resources.")
assert list(actor_method_resources.keys()) == [
"CPU"
], ("Actor method should only have CPUs")
assert actor_method_resources["CPU"][0][1] == 1, (
"Actor method should default to one cpu.")
else:
if ("num_cpus" not in decorator_args
and "num_cpus" not in instantiation_args):
assert actor_resources["CPU"][0][1] == 1, (
"Actor should default to one cpu.")
correct_resources = {}
defined_resources = decorator_args.copy()
defined_resources.update(instantiation_args)
for resource, value in defined_resources.items():
if resource == "num_cpus":
correct_resources["CPU"] = value
elif resource == "num_gpus":
correct_resources["GPU"] = value
elif resource == "resources":
for custom_resource, amount in value.items():
correct_resources[custom_resource] = amount
for resource, amount in correct_resources.items():
assert (actor_resources[resource][0][0] ==
actor_method_resources[resource][0][0]), (
"Should have assigned same {} for both actor ",
"and actor method.".format(resource))
assert (actor_resources[resource][0][
1] == actor_method_resources[resource][0][1]), (
"Should have assigned same amount of {} for both ",
"actor and actor method.".format(resource))
assert actor_resources[resource][0][1] == amount, (
"Actor should have {amount} {resource} but has ",
"{amount} {resource}".format(
amount=amount, resource=resource))
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_actor_gpus(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
num_gpus_per_raylet = 4
for i in range(num_nodes):
cluster.add_node(
num_cpus=10 * num_gpus_per_raylet, num_gpus=num_gpus_per_raylet)
ray.init(address=cluster.address)
@ray.remote(num_gpus=1)
class Actor1:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
# Create one actor per GPU.
actors = [Actor1.remote() for _ in range(num_nodes * num_gpus_per_raylet)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = {location for location, gpu_id in locations_and_ids}
assert len(node_names) == num_nodes
location_actor_combinations = []
for node_name in node_names:
for gpu_id in range(num_gpus_per_raylet):
location_actor_combinations.append((node_name, (gpu_id, )))
assert set(locations_and_ids) == set(location_actor_combinations)
# Creating a new actor should fail because all of the GPUs are being
# used.
a = Actor1.remote()
ready_ids, _ = ray.wait([a.get_location_and_ids.remote()], timeout=0.01)
assert ready_ids == []
def test_actor_multiple_gpus(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
num_gpus_per_raylet = 5
for i in range(num_nodes):
cluster.add_node(
num_cpus=10 * num_gpus_per_raylet, num_gpus=num_gpus_per_raylet)
ray.init(address=cluster.address)
@ray.remote(num_gpus=2)
class Actor1:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
# Create some actors.
actors1 = [Actor1.remote() for _ in range(num_nodes * 2)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors1])
node_names = {location for location, gpu_id in locations_and_ids}
assert len(node_names) == num_nodes
# Keep track of which GPU IDs are being used for each location.
gpus_in_use = {node_name: [] for node_name in node_names}
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
assert len(set(gpus_in_use[node_name])) == 4
# Creating a new actor should fail because all of the GPUs are being
# used.
a = Actor1.remote()
ready_ids, _ = ray.wait([a.get_location_and_ids.remote()], timeout=0.01)
assert ready_ids == []
# We should be able to create more actors that use only a single GPU.
@ray.remote(num_gpus=1)
class Actor2:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
# Create some actors.
actors2 = [Actor2.remote() for _ in range(num_nodes)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors2])
names = {location for location, gpu_id in locations_and_ids}
assert node_names == names
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
assert len(gpus_in_use[node_name]) == 5
assert set(gpus_in_use[node_name]) == set(range(5))
# Creating a new actor should fail because all of the GPUs are being
# used.
a = Actor2.remote()
ready_ids, _ = ray.wait([a.get_location_and_ids.remote()], timeout=0.01)
assert ready_ids == []
def test_actor_different_numbers_of_gpus(ray_start_cluster):
# Test that we can create actors on two nodes that have different
# numbers of GPUs.
cluster = ray_start_cluster
cluster.add_node(num_cpus=10, num_gpus=0)
cluster.add_node(num_cpus=10, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=10)
ray.init(address=cluster.address)
@ray.remote(num_gpus=1)
class Actor1:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
# Create some actors.
actors = [Actor1.remote() for _ in range(0 + 5 + 10)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = {location for location, gpu_id in locations_and_ids}
assert len(node_names) == 2
for node_name in node_names:
node_gpu_ids = [
gpu_id for location, gpu_id in locations_and_ids
if location == node_name
]
assert len(node_gpu_ids) in [5, 10]
assert set(node_gpu_ids) == {(i, ) for i in range(len(node_gpu_ids))}
# Creating a new actor should fail because all of the GPUs are being
# used.
a = Actor1.remote()
ready_ids, _ = ray.wait([a.get_location_and_ids.remote()], timeout=0.01)
assert ready_ids == []
def test_actor_multiple_gpus_from_multiple_tasks(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 5
num_gpus_per_raylet = 5
for i in range(num_nodes):
cluster.add_node(
num_cpus=10 * num_gpus_per_raylet,
num_gpus=num_gpus_per_raylet,
_internal_config=json.dumps({
"num_heartbeats_timeout": 1000
}))
ray.init(address=cluster.address)
@ray.remote
def create_actors(i, n):
@ray.remote(num_gpus=1)
class Actor:
def __init__(self, i, j):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return ((ray.worker.global_worker.node.unique_id),
tuple(self.gpu_ids))
def sleep(self):
time.sleep(100)
# Create n actors.
actors = []
for j in range(n):
actors.append(Actor.remote(i, j))
locations = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
# Put each actor to sleep for a long time to prevent them from getting
# terminated.
for actor in actors:
actor.sleep.remote()
return locations
all_locations = ray.get([
create_actors.remote(i, num_gpus_per_raylet) for i in range(num_nodes)
])
# Make sure that no two actors are assigned to the same GPU.
node_names = {
location
for locations in all_locations for location, gpu_id in locations
}
assert len(node_names) == num_nodes
# Keep track of which GPU IDs are being used for each location.
gpus_in_use = {node_name: [] for node_name in node_names}
for locations in all_locations:
for location, gpu_ids in locations:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
assert len(set(gpus_in_use[node_name])) == num_gpus_per_raylet
@ray.remote(num_gpus=1)
class Actor:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
# All the GPUs should be used up now.
a = Actor.remote()
ready_ids, _ = ray.wait([a.get_location_and_ids.remote()], timeout=0.01)
assert ready_ids == []
def test_actors_and_tasks_with_gpus(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
num_gpus_per_raylet = 2
for i in range(num_nodes):
cluster.add_node(
num_cpus=num_gpus_per_raylet, num_gpus=num_gpus_per_raylet)
ray.init(address=cluster.address)
def check_intervals_non_overlapping(list_of_intervals):
for i in range(len(list_of_intervals)):
for j in range(i):
first_interval = list_of_intervals[i]
second_interval = list_of_intervals[j]
# Check that list_of_intervals[i] and list_of_intervals[j]
# don't overlap.
assert first_interval[0] < first_interval[1]
assert second_interval[0] < second_interval[1]
intervals_nonoverlapping = (
first_interval[1] <= second_interval[0]
or second_interval[1] <= first_interval[0])
assert intervals_nonoverlapping, (
"Intervals {} and {} are overlapping.".format(
first_interval, second_interval))
@ray.remote(num_gpus=1)
def f1():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in range(num_gpus_per_raylet)
return (ray.worker.global_worker.node.unique_id, tuple(gpu_ids),
[t1, t2])
@ray.remote(num_gpus=2)
def f2():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in range(num_gpus_per_raylet)
assert gpu_ids[1] in range(num_gpus_per_raylet)
return (ray.worker.global_worker.node.unique_id, tuple(gpu_ids),
[t1, t2])
@ray.remote(num_gpus=1)
class Actor1:
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
assert self.gpu_ids[0] in range(num_gpus_per_raylet)
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (ray.worker.global_worker.node.unique_id,
tuple(self.gpu_ids))
def locations_to_intervals_for_many_tasks():
# Launch a bunch of GPU tasks.
locations_ids_and_intervals = ray.get(
[f1.remote() for _ in range(5 * num_nodes * num_gpus_per_raylet)] +
[f2.remote() for _ in range(5 * num_nodes * num_gpus_per_raylet)] +
[f1.remote() for _ in range(5 * num_nodes * num_gpus_per_raylet)])
locations_to_intervals = collections.defaultdict(lambda: [])
for location, gpu_ids, interval in locations_ids_and_intervals:
for gpu_id in gpu_ids:
locations_to_intervals[(location, gpu_id)].append(interval)
return locations_to_intervals
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Create an actor that uses a GPU.
a = Actor1.remote()
actor_location = ray.get(a.get_location_and_ids.remote())
actor_location = (actor_location[0], actor_location[1][0])
# This check makes sure that actor_location is formatted the same way
# that the keys of locations_to_intervals are formatted.
assert actor_location in locations_to_intervals
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Make sure that the actor's GPU was not used.
assert actor_location not in locations_to_intervals
# Create more actors to fill up all the GPUs.
more_actors = [
Actor1.remote() for _ in range(num_nodes * num_gpus_per_raylet - 1)
]
# Wait for the actors to finish being created.
ray.get([actor.get_location_and_ids.remote() for actor in more_actors])
# Now if we run some GPU tasks, they should not be scheduled.
results = [f1.remote() for _ in range(30)]
ready_ids, remaining_ids = ray.wait(results, timeout=1.0)
assert len(ready_ids) == 0
def test_actors_and_tasks_with_gpus_version_two(shutdown_only):
# Create tasks and actors that both use GPUs and make sure that they
# are given different GPUs
num_gpus = 4
ray.init(
num_cpus=(num_gpus + 1),
num_gpus=num_gpus,
object_store_memory=int(150 * 1024 * 1024))
# The point of this actor is to record which GPU IDs have been seen. We
# can't just return them from the tasks, because the tasks don't return
# for a long time in order to make sure the GPU is not released
# prematurely.
@ray.remote
class RecordGPUs:
def __init__(self):
self.gpu_ids_seen = []
self.num_calls = 0
def add_ids(self, gpu_ids):
self.gpu_ids_seen += gpu_ids
self.num_calls += 1
def get_gpu_ids_and_calls(self):
return self.gpu_ids_seen, self.num_calls
@ray.remote(num_gpus=1)
def f(record_gpu_actor):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
record_gpu_actor.add_ids.remote(gpu_ids)
# Sleep for a long time so that the GPU never gets released. This task
# will be killed by ray.shutdown() before it actually finishes.
time.sleep(1000)
@ray.remote(num_gpus=1)
class Actor:
def __init__(self, record_gpu_actor):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
record_gpu_actor.add_ids.remote(self.gpu_ids)
def check_gpu_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
record_gpu_actor = RecordGPUs.remote()
actors = []
actor_results = []
for _ in range(num_gpus // 2):
f.remote(record_gpu_actor)
a = Actor.remote(record_gpu_actor)
actor_results.append(a.check_gpu_ids.remote())
# Prevent the actor handle from going out of scope so that its GPU
# resources don't get released.
actors.append(a)
# Make sure that the actor method calls succeeded.
ray.get(actor_results)
start_time = time.time()
while time.time() - start_time < 30:
seen_gpu_ids, num_calls = ray.get(
record_gpu_actor.get_gpu_ids_and_calls.remote())
if num_calls == num_gpus:
break
assert set(seen_gpu_ids) == set(range(num_gpus))
def test_blocking_actor_task(shutdown_only):
ray.init(
num_cpus=1, num_gpus=1, object_store_memory=int(150 * 1024 * 1024))
@ray.remote(num_gpus=1)
def f():
return 1
@ray.remote
class Foo:
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure we can execute a blocking actor method even if there is
# only one CPU.
actor = Foo.remote()
ray.get(actor.blocking_method.remote())
@ray.remote(num_cpus=1)
class CPUFoo:
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure that lifetime CPU resources are not released when actors
# block.
actor = CPUFoo.remote()
x_id = actor.blocking_method.remote()
ready_ids, remaining_ids = ray.wait([x_id], timeout=1.0)
assert ready_ids == []
assert remaining_ids == [x_id]
@ray.remote(num_gpus=1)
class GPUFoo:
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure that GPU resources are not released when actors block.
actor = GPUFoo.remote()
x_id = actor.blocking_method.remote()
ready_ids, remaining_ids = ray.wait([x_id], timeout=1.0)
assert ready_ids == []
assert remaining_ids == [x_id]
def test_lifetime_and_transient_resources(ray_start_regular):
# This actor acquires resources only when running methods.
@ray.remote
class Actor1:
def method(self):
pass
# This actor acquires resources for its lifetime.
@ray.remote(num_cpus=1)
class Actor2:
def method(self):
pass
actor1s = [Actor1.remote() for _ in range(10)]
ray.get([a.method.remote() for a in actor1s])
actor2s = [Actor2.remote() for _ in range(2)]
results = [a.method.remote() for a in actor2s]
ready_ids, remaining_ids = ray.wait(
results, num_returns=len(results), timeout=5.0)
assert len(ready_ids) == 1
def test_custom_label_placement(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=2, resources={"CustomResource1": 2})
cluster.add_node(num_cpus=2, resources={"CustomResource2": 2})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
class ResourceActor1:
def get_location(self):
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
class ResourceActor2:
def get_location(self):
return ray.worker.global_worker.node.unique_id
node_id = ray.worker.global_worker.node.unique_id
# Create some actors.
actors1 = [ResourceActor1.remote() for _ in range(2)]
actors2 = [ResourceActor2.remote() for _ in range(2)]
locations1 = ray.get([a.get_location.remote() for a in actors1])
locations2 = ray.get([a.get_location.remote() for a in actors2])
for location in locations1:
assert location == node_id
for location in locations2:
assert location != node_id
def test_creating_more_actors_than_resources(shutdown_only):
ray.init(num_cpus=10, num_gpus=2, resources={"CustomResource1": 1})
@ray.remote(num_gpus=1)
class ResourceActor1:
def method(self):
return ray.get_gpu_ids()[0]
@ray.remote(resources={"CustomResource1": 1})
class ResourceActor2:
def method(self):
pass
# Make sure the first two actors get created and the third one does
# not.
actor1 = ResourceActor1.remote()
result1 = actor1.method.remote()
ray.wait([result1])
actor2 = ResourceActor1.remote()
result2 = actor2.method.remote()
ray.wait([result2])
actor3 = ResourceActor1.remote()
result3 = actor3.method.remote()
ready_ids, _ = ray.wait([result3], timeout=0.2)
assert len(ready_ids) == 0
# By deleting actor1, we free up resources to create actor3.
del actor1
results = ray.get([result1, result2, result3])
assert results[0] == results[2]
assert set(results) == {0, 1}
# Make sure that when one actor goes out of scope a new actor is
# created because some resources have been freed up.
results = []
for _ in range(3):
actor = ResourceActor2.remote()
object_id = actor.method.remote()
results.append(object_id)
# Wait for the task to execute. We do this because otherwise it may
# be possible for the __ray_terminate__ task to execute before the
# method.
ray.wait([object_id])
ray.get(results)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_advanced.py
|
Python
|
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import random
import six
import sys
import threading
import time
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
@pytest.mark.skipif(ray_constants.direct_call_enabled(), reason="TODO(ekl)")
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo:
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor:
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
@pytest.mark.skipif(
ray_constants.direct_call_enabled(), reason="uses task and object table")
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(address=cluster.address)
class RawActor:
def get(self):
return ray.worker.global_worker.node.unique_id
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.node.unique_id
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass:
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_advanced remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
# Check that Actors are not overwritten by remote calls from different
# classes.
@ray.remote
class RemoteActor1:
def __init__(self):
pass
def function1(self):
return 0
@ray.remote
class RemoteActor2:
def __init__(self):
pass
def function2(self):
return 1
actor1 = RemoteActor1.remote()
_ = RemoteActor2.remote()
assert ray.get(actor1.function1.remote()) == 0
# Test passing ObjectIDs.
@ray.remote
def direct_dep(input):
return input
@ray.remote
def indirect_dep(input):
return ray.get(direct_dep.remote(input[0]))
assert ray.get(indirect_dep.remote(["hello"])) == "hello"
def test_wait_makes_object_local(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return np.zeros(1024 * 1024)
a = Foo.remote()
# Test get makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ray.get(x_id)
assert ray.worker.global_worker.core_worker.object_exists(x_id)
# Test wait makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ok, _ = ray.wait([x_id])
assert len(ok) == 1
assert ray.worker.global_worker.core_worker.object_exists(x_id)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_advanced_2.py
|
Python
|
# coding: utf-8
import logging
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 3
ray.init(num_cpus=num_gpus, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.2)
return os.getpid()
start_time = time.time()
while True:
num_workers_started = len(
set(ray.get([f.remote() for _ in range(num_gpus)])))
if num_workers_started == num_gpus:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
ray.get([f1.remote() for _ in range(10)])
ray.get([f2.remote() for _ in range(10)])
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0:
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1:
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor:
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1:
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2:
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_node_id_resource(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
cluster.add_node(num_cpus=3)
ray.init(address=cluster.address)
local_node = ray.state.current_node_id()
# Note that these will have the same IP in the test cluster
assert len(ray.state.node_ids()) == 2
assert local_node in ray.state.node_ids()
@ray.remote(resources={local_node: 1})
def f():
return ray.state.current_node_id()
# Check the node id resource is automatically usable for scheduling.
assert ray.get(f.remote()) == ray.state.current_node_id()
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(500)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
for key in list(resources.keys()):
if key.startswith("node:"):
del resources[key]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.", resources)
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test_utils.wait_for_pid_to_exit(pid1)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_advanced_3.py
|
Python
|
# coding: utf-8
import glob
import logging
import os
import setproctitle
import shutil
import json
import sys
import socket
import subprocess
import tempfile
import time
import numpy as np
import pickle
import pytest
import ray
from ray import signature
import ray.ray_constants as ray_constants
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_actors(num_actors, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.actors()) >= num_actors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.actors()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
class Actor:
def __init__(self):
pass
_ = Actor.remote()
# Wait for actor to be created
wait_for_num_actors(1)
actor_table = ray.actors()
assert len(actor_table) == 1
actor_info, = actor_table.values()
assert actor_info["JobID"] == job_id.hex()
assert "IPAddress" in actor_info["Address"]
assert "IPAddress" in actor_info["OwnerAddress"]
assert actor_info["Address"]["Port"] != actor_info["OwnerAddress"]["Port"]
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
@pytest.mark.skipif(
ray_constants.direct_call_enabled(),
reason="object and task API not supported")
def test_global_state_task_object_api(shutdown_only):
ray.init()
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [
signature.DUMMY_TYPE, 1, signature.DUMMY_TYPE, "hi",
signature.DUMMY_TYPE, x_id
]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError:
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(
num_cpus=1,
object_store_memory=int(10**8),
_internal_config=json.dumps({
"object_pinning_enabled": 0
}))
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName:
def __init__(self):
assert setproctitle.getproctitle() == "ray::UniqueName.__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray::UniqueName.f()"
@ray.remote
def unique_1():
assert "unique_1" in setproctitle.getproctitle()
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
x_id = ray.put("HI")
x_binary = x_id.binary()
assert ray.get(ray.ObjectID(x_binary)) == "HI"
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert ray.get(x_id) == "HI"
assert ray.get(ray.ObjectID(x_binary)) == "HI"
# now it can be evicted since x_id pins it but x_binary does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(ray.ObjectID(x_binary))
# weakref put
y_id = ray.put("HI", weakref=True)
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(y_id)
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x, weakref=True)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor:
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob("{}/worker*.out".format(logs_dir))
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor:
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir))
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_array.py
|
Python
|
from importlib import reload
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import pytest
import sys
import ray
import ray.experimental.array.remote as ra
import ray.experimental.array.distributed as da
import ray.cluster_utils
@pytest.fixture
def reload_modules():
modules = [ra.core, ra.random, ra.linalg, da.core, da.random, da.linalg]
[reload(module) for module in modules]
def test_remote_array_methods(ray_start_2_cpus, reload_modules):
# test eye
object_id = ra.eye.remote(3)
val = ray.get(object_id)
assert_almost_equal(val, np.eye(3))
# test zeros
object_id = ra.zeros.remote([3, 4, 5])
val = ray.get(object_id)
assert_equal(val, np.zeros([3, 4, 5]))
# test qr - pass by value
a_val = np.random.normal(size=[10, 11])
q_id, r_id = ra.linalg.qr.remote(a_val)
q_val = ray.get(q_id)
r_val = ray.get(r_id)
assert_almost_equal(np.dot(q_val, r_val), a_val)
# test qr - pass by objectid
a = ra.random.normal.remote([10, 13])
q_id, r_id = ra.linalg.qr.remote(a)
a_val = ray.get(a)
q_val = ray.get(q_id)
r_val = ray.get(r_id)
assert_almost_equal(np.dot(q_val, r_val), a_val)
def test_distributed_array_assemble(ray_start_2_cpus, reload_modules):
a = ra.ones.remote([da.BLOCK_SIZE, da.BLOCK_SIZE])
b = ra.zeros.remote([da.BLOCK_SIZE, da.BLOCK_SIZE])
x = da.DistArray([2 * da.BLOCK_SIZE, da.BLOCK_SIZE], np.array([[a], [b]]))
assert_equal(
x.assemble(),
np.vstack([
np.ones([da.BLOCK_SIZE, da.BLOCK_SIZE]),
np.zeros([da.BLOCK_SIZE, da.BLOCK_SIZE])
]))
def test_distributed_array_methods(ray_start_cluster_2_nodes, reload_modules):
x = da.zeros.remote([9, 25, 51], "float")
assert_equal(ray.get(da.assemble.remote(x)), np.zeros([9, 25, 51]))
x = da.ones.remote([11, 25, 49], dtype_name="float")
assert_equal(ray.get(da.assemble.remote(x)), np.ones([11, 25, 49]))
x = da.random.normal.remote([11, 25, 49])
y = da.copy.remote(x)
assert_equal(
ray.get(da.assemble.remote(x)), ray.get(da.assemble.remote(y)))
x = da.eye.remote(25, dtype_name="float")
assert_equal(ray.get(da.assemble.remote(x)), np.eye(25))
x = da.random.normal.remote([25, 49])
y = da.triu.remote(x)
assert_equal(
ray.get(da.assemble.remote(y)), np.triu(
ray.get(da.assemble.remote(x))))
x = da.random.normal.remote([25, 49])
y = da.tril.remote(x)
assert_equal(
ray.get(da.assemble.remote(y)), np.tril(
ray.get(da.assemble.remote(x))))
x = da.random.normal.remote([25, 49])
y = da.random.normal.remote([49, 18])
z = da.dot.remote(x, y)
w = da.assemble.remote(z)
u = da.assemble.remote(x)
v = da.assemble.remote(y)
assert_almost_equal(ray.get(w), np.dot(ray.get(u), ray.get(v)))
assert_almost_equal(ray.get(w), np.dot(ray.get(u), ray.get(v)))
# test add
x = da.random.normal.remote([23, 42])
y = da.random.normal.remote([23, 42])
z = da.add.remote(x, y)
assert_almost_equal(
ray.get(da.assemble.remote(z)),
ray.get(da.assemble.remote(x)) + ray.get(da.assemble.remote(y)))
# test subtract
x = da.random.normal.remote([33, 40])
y = da.random.normal.remote([33, 40])
z = da.subtract.remote(x, y)
assert_almost_equal(
ray.get(da.assemble.remote(z)),
ray.get(da.assemble.remote(x)) - ray.get(da.assemble.remote(y)))
# test transpose
x = da.random.normal.remote([234, 432])
y = da.transpose.remote(x)
assert_equal(
ray.get(da.assemble.remote(x)).T, ray.get(da.assemble.remote(y)))
# test numpy_to_dist
x = da.random.normal.remote([23, 45])
y = da.assemble.remote(x)
z = da.numpy_to_dist.remote(y)
w = da.assemble.remote(z)
assert_equal(
ray.get(da.assemble.remote(x)), ray.get(da.assemble.remote(z)))
assert_equal(ray.get(y), ray.get(w))
# test da.tsqr
for shape in [[123, da.BLOCK_SIZE], [7, da.BLOCK_SIZE],
[da.BLOCK_SIZE, da.BLOCK_SIZE], [da.BLOCK_SIZE, 7],
[10 * da.BLOCK_SIZE, da.BLOCK_SIZE]]:
x = da.random.normal.remote(shape)
K = min(shape)
q, r = da.linalg.tsqr.remote(x)
x_val = ray.get(da.assemble.remote(x))
q_val = ray.get(da.assemble.remote(q))
r_val = ray.get(r)
assert r_val.shape == (K, shape[1])
assert_equal(r_val, np.triu(r_val))
assert_almost_equal(x_val, np.dot(q_val, r_val))
assert_almost_equal(np.dot(q_val.T, q_val), np.eye(K))
# test da.linalg.modified_lu
def test_modified_lu(d1, d2):
print("testing dist_modified_lu with d1 = " + str(d1) + ", d2 = " +
str(d2))
assert d1 >= d2
m = ra.random.normal.remote([d1, d2])
q, r = ra.linalg.qr.remote(m)
l, u, s = da.linalg.modified_lu.remote(da.numpy_to_dist.remote(q))
q_val = ray.get(q)
ray.get(r)
l_val = ray.get(da.assemble.remote(l))
u_val = ray.get(u)
s_val = ray.get(s)
s_mat = np.zeros((d1, d2))
for i in range(len(s_val)):
s_mat[i, i] = s_val[i]
# Check that q - s = l * u.
assert_almost_equal(q_val - s_mat, np.dot(l_val, u_val))
# Check that u is upper triangular.
assert_equal(np.triu(u_val), u_val)
# Check that l is lower triangular.
assert_equal(np.tril(l_val), l_val)
for d1, d2 in [(100, 100), (99, 98), (7, 5), (7, 7), (20, 7), (20, 10)]:
test_modified_lu(d1, d2)
# test dist_tsqr_hr
def test_dist_tsqr_hr(d1, d2):
print("testing dist_tsqr_hr with d1 = " + str(d1) + ", d2 = " +
str(d2))
a = da.random.normal.remote([d1, d2])
y, t, y_top, r = da.linalg.tsqr_hr.remote(a)
a_val = ray.get(da.assemble.remote(a))
y_val = ray.get(da.assemble.remote(y))
t_val = ray.get(t)
y_top_val = ray.get(y_top)
r_val = ray.get(r)
tall_eye = np.zeros((d1, min(d1, d2)))
np.fill_diagonal(tall_eye, 1)
q = tall_eye - np.dot(y_val, np.dot(t_val, y_top_val.T))
# Check that q.T * q = I.
assert_almost_equal(np.dot(q.T, q), np.eye(min(d1, d2)))
# Check that a = (I - y * t * y_top.T) * r.
assert_almost_equal(np.dot(q, r_val), a_val)
for d1, d2 in [(123, da.BLOCK_SIZE), (7, da.BLOCK_SIZE), (da.BLOCK_SIZE,
da.BLOCK_SIZE),
(da.BLOCK_SIZE, 7), (10 * da.BLOCK_SIZE, da.BLOCK_SIZE)]:
test_dist_tsqr_hr(d1, d2)
def test_dist_qr(d1, d2):
print("testing qr with d1 = {}, and d2 = {}.".format(d1, d2))
a = da.random.normal.remote([d1, d2])
K = min(d1, d2)
q, r = da.linalg.qr.remote(a)
a_val = ray.get(da.assemble.remote(a))
q_val = ray.get(da.assemble.remote(q))
r_val = ray.get(da.assemble.remote(r))
assert q_val.shape == (d1, K)
assert r_val.shape == (K, d2)
assert_almost_equal(np.dot(q_val.T, q_val), np.eye(K))
assert_equal(r_val, np.triu(r_val))
assert_almost_equal(a_val, np.dot(q_val, r_val))
for d1, d2 in [(123, da.BLOCK_SIZE), (7, da.BLOCK_SIZE), (da.BLOCK_SIZE,
da.BLOCK_SIZE),
(da.BLOCK_SIZE, 7), (13, 21), (34, 35), (8, 7)]:
test_dist_qr(d1, d2)
test_dist_qr(d2, d1)
for _ in range(20):
d1 = np.random.randint(1, 35)
d2 = np.random.randint(1, 35)
test_dist_qr(d1, d2)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_autoscaler.py
|
Python
|
import shutil
import tempfile
import threading
import time
import unittest
import yaml
import copy
import ray
import ray.services as services
from ray.autoscaler.autoscaler import StandardAutoscaler, LoadMetrics, \
fillout_defaults, validate_config
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, TAG_RAY_NODE_STATUS, \
STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED
from ray.autoscaler.node_provider import NODE_PROVIDERS, NodeProvider
from ray.test_utils import RayTestTimeoutException
import pytest
class MockNode:
def __init__(self, node_id, tags):
self.node_id = node_id
self.state = "pending"
self.tags = tags
self.external_ip = "1.2.3.4"
self.internal_ip = "172.0.0.{}".format(self.node_id)
def matches(self, tags):
for k, v in tags.items():
if k not in self.tags or self.tags[k] != v:
return False
return True
class MockProcessRunner:
def __init__(self, fail_cmds=[]):
self.calls = []
self.fail_cmds = fail_cmds
def check_call(self, cmd, *args, **kwargs):
for token in self.fail_cmds:
if token in str(cmd):
raise Exception("Failing command on purpose")
self.calls.append(cmd)
def assert_has_call(self, ip, pattern):
out = ""
for cmd in self.calls:
msg = " ".join(cmd)
if ip in msg:
out += msg
out += "\n"
if pattern in out:
return True
else:
raise Exception("Did not find [{}] in [{}] for {}".format(
pattern, out, ip))
def assert_not_has_call(self, ip, pattern):
out = ""
for cmd in self.calls:
msg = " ".join(cmd)
if ip in msg:
out += msg
out += "\n"
if pattern in out:
raise Exception("Found [{}] in [{}] for {}".format(
pattern, out, ip))
else:
return True
def clear_history(self):
self.calls = []
class MockProvider(NodeProvider):
def __init__(self, cache_stopped=False):
self.mock_nodes = {}
self.next_id = 0
self.throw = False
self.fail_creates = False
self.ready_to_create = threading.Event()
self.ready_to_create.set()
self.cache_stopped = cache_stopped
def non_terminated_nodes(self, tag_filters):
if self.throw:
raise Exception("oops")
return [
n.node_id for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def non_terminated_node_ips(self, tag_filters):
if self.throw:
raise Exception("oops")
return [
n.internal_ip for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def is_running(self, node_id):
return self.mock_nodes[node_id].state == "running"
def is_terminated(self, node_id):
return self.mock_nodes[node_id].state in ["stopped", "terminated"]
def node_tags(self, node_id):
return self.mock_nodes[node_id].tags
def internal_ip(self, node_id):
return self.mock_nodes[node_id].internal_ip
def external_ip(self, node_id):
return self.mock_nodes[node_id].external_ip
def create_node(self, node_config, tags, count):
self.ready_to_create.wait()
if self.fail_creates:
return
if self.cache_stopped:
for node in self.mock_nodes.values():
if node.state == "stopped" and count > 0:
count -= 1
node.state = "pending"
node.tags.update(tags)
for _ in range(count):
self.mock_nodes[self.next_id] = MockNode(self.next_id, tags.copy())
self.next_id += 1
def set_node_tags(self, node_id, tags):
self.mock_nodes[node_id].tags.update(tags)
def terminate_node(self, node_id):
if self.cache_stopped:
self.mock_nodes[node_id].state = "stopped"
else:
self.mock_nodes[node_id].state = "terminated"
def finish_starting_nodes(self):
for node in self.mock_nodes.values():
if node.state == "pending":
node.state = "running"
SMALL_CLUSTER = {
"cluster_name": "default",
"min_workers": 2,
"max_workers": 2,
"initial_workers": 0,
"autoscaling_mode": "default",
"target_utilization_fraction": 0.8,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": "/dev/null",
},
"head_node": {
"TestProp": 1,
},
"worker_nodes": {
"TestProp": 2,
},
"file_mounts": {},
"initialization_commands": ["init_cmd"],
"setup_commands": ["setup_cmd"],
"head_setup_commands": ["head_setup_cmd"],
"worker_setup_commands": ["worker_setup_cmd"],
"head_start_ray_commands": ["start_ray_head"],
"worker_start_ray_commands": ["start_ray_worker"],
}
class LoadMetricsTest(unittest.TestCase):
def testUpdate(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
assert lm.approx_workers_used() == 0.5
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
assert lm.approx_workers_used() == 1.0
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 0}, {})
assert lm.approx_workers_used() == 2.0
def testLoadMessages(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 0.5)
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {"CPU": 1})
self.assertEqual(lm.approx_workers_used(), 1.0)
# Both nodes count as busy since there is a queue on one.
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 2}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 0}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 2.0)
# No queue anymore, so we're back to exact accounting.
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
self.assertEqual(lm.approx_workers_used(), 1.5)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {"GPU": 1})
self.assertEqual(lm.approx_workers_used(), 2.0)
lm.update("3.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("4.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("5.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("6.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("7.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
lm.update("8.3.3.3", {"CPU": 2}, {"CPU": 1}, {})
self.assertEqual(lm.approx_workers_used(), 8.0)
lm.update("2.2.2.2", {"CPU": 2}, {"CPU": 1}, {}) # no queue anymore
self.assertEqual(lm.approx_workers_used(), 4.5)
def testPruneByNodeIp(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 1}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 1}, {"CPU": 0}, {})
lm.prune_active_ips({"1.1.1.1", "4.4.4.4"})
assert lm.approx_workers_used() == 1.0
def testBottleneckResource(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 2, "GPU": 16}, {"CPU": 2, "GPU": 2}, {})
assert lm.approx_workers_used() == 1.88
def testHeartbeat(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 1}, {})
lm.mark_active("2.2.2.2")
assert "1.1.1.1" in lm.last_heartbeat_time_by_ip
assert "2.2.2.2" in lm.last_heartbeat_time_by_ip
assert "3.3.3.3" not in lm.last_heartbeat_time_by_ip
def testDebugString(self):
lm = LoadMetrics()
lm.update("1.1.1.1", {"CPU": 2}, {"CPU": 0}, {})
lm.update("2.2.2.2", {"CPU": 2, "GPU": 16}, {"CPU": 2, "GPU": 2}, {})
lm.update("3.3.3.3", {
"memory": 20,
"object_store_memory": 40
}, {
"memory": 0,
"object_store_memory": 20
}, {})
debug = lm.info_string()
assert ("ResourceUsage=2.0/4.0 CPU, 14.0/16.0 GPU, "
"1.05 GiB/1.05 GiB memory, "
"1.05 GiB/2.1 GiB object_store_memory") in debug
assert "NumNodesConnected=3" in debug
assert "NumNodesUsed=2.88" in debug
class AutoscalingTest(unittest.TestCase):
def setUp(self):
NODE_PROVIDERS["mock"] = \
lambda: (None, self.create_provider)
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
del NODE_PROVIDERS["mock"]
shutil.rmtree(self.tmpdir)
ray.shutdown()
def waitFor(self, condition, num_retries=50):
for _ in range(num_retries):
if condition():
return
time.sleep(.1)
raise RayTestTimeoutException(
"Timed out waiting for {}".format(condition))
def waitForNodes(self, expected, comparison=None, tag_filters={}):
MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
if comparison is None:
comparison = self.assertEqual
try:
comparison(n, expected)
return
except Exception:
if i == MAX_ITER - 1:
raise
time.sleep(.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config):
path = self.tmpdir + "/simple.yaml"
with open(path, "w") as f:
f.write(yaml.dump(config))
return path
def testInvalidConfig(self):
invalid_config = "/dev/null"
with pytest.raises(ValueError):
StandardAutoscaler(
invalid_config, LoadMetrics(), update_interval_s=0)
def testValidation(self):
"""Ensures that schema validation is working."""
config = copy.deepcopy(SMALL_CLUSTER)
try:
validate_config(config)
except Exception:
self.fail("Test config did not pass validation test!")
config["blah"] = "blah"
with pytest.raises(ValueError):
validate_config(config)
del config["blah"]
config["provider"]["blah"] = "blah"
with pytest.raises(ValueError):
validate_config(config)
del config["provider"]["blah"]
del config["provider"]
with pytest.raises(ValueError):
validate_config(config)
def testValidateDefaultConfig(self):
config = {}
config["provider"] = {
"type": "aws",
"region": "us-east-1",
"availability_zone": "us-east-1a",
}
config = fillout_defaults(config)
try:
validate_config(config)
except Exception:
self.fail("Default config did not pass validation test!")
def testScaleUp(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(2)
autoscaler.update()
self.waitForNodes(2)
def testManualAutoscaling(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 50
cores_per_node = 2
config["worker_nodes"] = {"Resources": {"CPU": cores_per_node}}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(0)
autoscaler.request_resources({"CPU": cores_per_node * 10})
for _ in range(5): # Maximum launch batch is 5
time.sleep(0.01)
autoscaler.update()
self.waitForNodes(10)
autoscaler.request_resources({"CPU": cores_per_node * 30})
for _ in range(4): # Maximum launch batch is 5
time.sleep(0.01)
autoscaler.update()
self.waitForNodes(30)
def testTerminateOutdatedNodesGracefully(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 5
config["max_workers"] = 5
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node({}, {TAG_RAY_NODE_TYPE: "worker"}, 10)
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(10)
# Gradually scales down to meet target size, never going too low
for _ in range(10):
autoscaler.update()
self.waitForNodes(5, comparison=self.assertLessEqual)
self.waitForNodes(4, comparison=self.assertGreaterEqual)
# Eventually reaches steady state
self.waitForNodes(5)
def testDynamicScaling(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(0)
autoscaler.update()
self.waitForNodes(2)
# Update the config to reduce the cluster size
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(1)
# Update the config to reduce the cluster size
new_config["min_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(6)
autoscaler.update()
self.waitForNodes(10)
def testInitialWorkers(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 20
config["initial_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(0)
autoscaler.update()
self.waitForNodes(5) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(10)
autoscaler.update()
def testAggressiveAutoscaling(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 20
config["initial_workers"] = 10
config["idle_timeout_minutes"] = 0
config["autoscaling_mode"] = "aggressive"
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node({}, {TAG_RAY_NODE_TYPE: "head"}, 1)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_TYPE: "head"}, )[0]
runner = MockProcessRunner()
lm = LoadMetrics()
lm.local_ip = head_ip
autoscaler = StandardAutoscaler(
config_path,
lm,
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
self.waitForNodes(1)
autoscaler.update()
self.waitForNodes(6) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(11)
# Connect the head and workers to end the bringup phase
addrs = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_TYPE: "worker"}, )
addrs += head_ip
for addr in addrs:
lm.update(addr, {"CPU": 2}, {"CPU": 0}, {})
lm.update(addr, {"CPU": 2}, {"CPU": 2}, {})
assert autoscaler.bringup
autoscaler.update()
assert not autoscaler.bringup
autoscaler.update()
self.waitForNodes(1)
# All of the nodes are down. Simulate some load on the head node
lm.update(head_ip, {"CPU": 2}, {"CPU": 0}, {})
autoscaler.update()
self.waitForNodes(6) # expected due to batch sizes and concurrency
autoscaler.update()
self.waitForNodes(11)
def testDelayedLaunch(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
# Update will try to create, but will block until we set the flag
self.provider.ready_to_create.clear()
autoscaler.update()
assert autoscaler.num_launches_pending.value == 2
assert len(self.provider.non_terminated_nodes({})) == 0
# Set the flag, check it updates
self.provider.ready_to_create.set()
self.waitForNodes(2)
assert autoscaler.num_launches_pending.value == 0
# Update the config to reduce the cluster size
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 1
def testDelayedLaunchWithFailure(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 10
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=8,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
# update() should launch a wave of 5 nodes (max_launch_batch)
# Force this first wave to block.
rtc1 = self.provider.ready_to_create
rtc1.clear()
autoscaler.update()
# Synchronization: wait for launchy thread to be blocked on rtc1
if hasattr(rtc1, "_cond"): # Python 3.5
waiters = rtc1._cond._waiters
else: # Python 2.7
waiters = rtc1._Event__cond._Condition__waiters
self.waitFor(lambda: len(waiters) == 1)
assert autoscaler.num_launches_pending.value == 5
assert len(self.provider.non_terminated_nodes({})) == 0
# Call update() to launch a second wave of 3 nodes,
# as 5 + 3 = 8 = max_concurrent_launches.
# Make this wave complete immediately.
rtc2 = threading.Event()
self.provider.ready_to_create = rtc2
rtc2.set()
autoscaler.update()
self.waitForNodes(3)
assert autoscaler.num_launches_pending.value == 5
# The first wave of 5 will now tragically fail
self.provider.fail_creates = True
rtc1.set()
self.waitFor(lambda: autoscaler.num_launches_pending.value == 0)
assert len(self.provider.non_terminated_nodes({})) == 3
# Retry the first wave, allowing it to succeed this time
self.provider.fail_creates = False
autoscaler.update()
self.waitForNodes(8)
assert autoscaler.num_launches_pending.value == 0
# Final wave of 2 nodes
autoscaler.update()
self.waitForNodes(10)
assert autoscaler.num_launches_pending.value == 0
def testUpdateThrottling(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=10)
autoscaler.update()
self.waitForNodes(2)
assert autoscaler.num_launches_pending.value == 0
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
# not updated yet
# note that node termination happens in the main thread, so
# we do not need to add any delay here before checking
assert len(self.provider.non_terminated_nodes({})) == 2
assert autoscaler.num_launches_pending.value == 0
def testLaunchConfigChange(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
# Update the config to change the node type
new_config = SMALL_CLUSTER.copy()
new_config["worker_nodes"]["InstanceType"] = "updated"
self.write_config(new_config)
self.provider.ready_to_create.clear()
for _ in range(5):
autoscaler.update()
self.waitForNodes(0)
self.provider.ready_to_create.set()
self.waitForNodes(2)
def testIgnoresCorruptedConfig(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_launch_batch=10,
max_concurrent_launches=10,
process_runner=runner,
max_failures=0,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
# Write a corrupted config
self.write_config("asdf")
for _ in range(10):
autoscaler.update()
time.sleep(0.1)
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 2
# New a good config again
new_config = SMALL_CLUSTER.copy()
new_config["min_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
self.waitForNodes(10)
def testMaxFailures(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
self.provider.throw = True
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=2,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
with pytest.raises(Exception):
autoscaler.update()
def testLaunchNewNodeOnOutOfBandTerminate(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "terminated"
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(2)
def testConfiguresNewNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
def testReportsConfigFailures(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"]["type"] = "external"
config = fillout_defaults(config)
config["provider"]["type"] = "mock"
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner(fail_cmds=["setup_cmd"])
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED})
def testConfiguresOutdatedNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.calls = []
new_config = SMALL_CLUSTER.copy()
new_config["worker_setup_commands"] = ["cmdX", "cmdY"]
self.write_config(new_config)
autoscaler.update()
autoscaler.update()
self.waitFor(lambda: len(runner.calls) > 0)
def testScaleUpBasedOnLoad(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 10
config["target_utilization_fraction"] = 0.5
config_path = self.write_config(config)
self.provider = MockProvider()
lm = LoadMetrics()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(1)
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 1
# Scales up as nodes are reported as used
local_ip = services.get_node_ip_address()
lm.update(local_ip, {"CPU": 2}, {"CPU": 0}, {}) # head
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 0}, {}) # worker 1
autoscaler.update()
self.waitForNodes(3)
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 0}, {})
autoscaler.update()
self.waitForNodes(5)
# Holds steady when load is removed
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 2}, {})
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 2}, {})
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 5
# Scales down as nodes become unused
lm.last_used_time_by_ip["172.0.0.0"] = 0
lm.last_used_time_by_ip["172.0.0.1"] = 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 3
lm.last_used_time_by_ip["172.0.0.2"] = 0
lm.last_used_time_by_ip["172.0.0.3"] = 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 1
def testDontScaleBelowTarget(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 0
config["max_workers"] = 2
config["target_utilization_fraction"] = 0.5
config_path = self.write_config(config)
self.provider = MockProvider()
lm = LoadMetrics()
runner = MockProcessRunner()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
assert autoscaler.num_launches_pending.value == 0
assert len(self.provider.non_terminated_nodes({})) == 0
# Scales up as nodes are reported as used
local_ip = services.get_node_ip_address()
lm.update(local_ip, {"CPU": 2}, {"CPU": 0}, {}) # head
# 1.0 nodes used => target nodes = 2 => target workers = 1
autoscaler.update()
self.waitForNodes(1)
# Make new node idle, and never used.
# Should hold steady as target is still 2.
lm.update("172.0.0.0", {"CPU": 0}, {"CPU": 0}, {})
lm.last_used_time_by_ip["172.0.0.0"] = 0
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 1
# Reduce load on head => target nodes = 1 => target workers = 0
lm.update(local_ip, {"CPU": 2}, {"CPU": 1}, {})
autoscaler.update()
assert len(self.provider.non_terminated_nodes({})) == 0
def testRecoverUnhealthyWorkers(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
# Mark a node as unhealthy
for _ in range(5):
if autoscaler.updaters:
time.sleep(0.05)
autoscaler.update()
assert not autoscaler.updaters
num_calls = len(runner.calls)
lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0
autoscaler.update()
self.waitFor(lambda: len(runner.calls) > num_calls, num_retries=150)
def testExternalNodeScaler(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "ray.autoscaler.node_provider.NodeProvider",
}
config_path = self.write_config(config)
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
assert isinstance(autoscaler.provider, NodeProvider)
def testExternalNodeScalerWrongImport(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "mymodule.provider_class",
}
invalid_provider = self.write_config(config)
with pytest.raises(ImportError):
StandardAutoscaler(
invalid_provider, LoadMetrics(), update_interval_s=0)
def testExternalNodeScalerWrongModuleFormat(self):
config = SMALL_CLUSTER.copy()
config["provider"] = {
"type": "external",
"module": "does-not-exist",
}
invalid_provider = self.write_config(config)
with pytest.raises(ValueError):
StandardAutoscaler(
invalid_provider, LoadMetrics(), update_interval_s=0)
def testSetupCommandsWithNoNodeCaching(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=False)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(1)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.0", "init_cmd")
runner.assert_has_call("172.0.0.0", "setup_cmd")
runner.assert_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
# Check the node was not reused
self.provider.terminate_node(0)
autoscaler.update()
self.waitForNodes(1)
runner.clear_history()
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.1", "init_cmd")
runner.assert_has_call("172.0.0.1", "setup_cmd")
runner.assert_has_call("172.0.0.1", "worker_setup_cmd")
runner.assert_has_call("172.0.0.1", "start_ray_worker")
def testSetupCommandsWithStoppedNodeCaching(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(1)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_has_call("172.0.0.0", "init_cmd")
runner.assert_has_call("172.0.0.0", "setup_cmd")
runner.assert_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
# Check the node was indeed reused
self.provider.terminate_node(0)
autoscaler.update()
self.waitForNodes(1)
runner.clear_history()
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.assert_not_has_call("172.0.0.0", "init_cmd")
runner.assert_not_has_call("172.0.0.0", "setup_cmd")
runner.assert_not_has_call("172.0.0.0", "worker_setup_cmd")
runner.assert_has_call("172.0.0.0", "start_ray_worker")
runner.clear_history()
autoscaler.update()
runner.assert_not_has_call("172.0.0.0", "setup_cmd")
# We did not start any other nodes
runner.assert_not_has_call("172.0.0.1", " ")
def testMultiNodeReuse(self):
config = SMALL_CLUSTER.copy()
config["min_workers"] = 3
config["max_workers"] = 3
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = StandardAutoscaler(
config_path,
lm,
max_failures=0,
process_runner=runner,
update_interval_s=0)
autoscaler.update()
self.waitForNodes(3)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
3, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
self.provider.terminate_node(0)
self.provider.terminate_node(1)
self.provider.terminate_node(2)
runner.clear_history()
# Scale up to 10 nodes, check we reuse the first 3 and add 7 more.
config["min_workers"] = 10
config["max_workers"] = 10
self.write_config(config)
autoscaler.update()
autoscaler.update()
self.waitForNodes(10)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
10, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
autoscaler.update()
for i in [0, 1, 2]:
runner.assert_not_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
for i in [3, 4, 5, 6, 7, 8, 9]:
runner.assert_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_autoscaler_yaml.py
|
Python
|
import os
import unittest
import yaml
from ray.autoscaler.autoscaler import fillout_defaults, validate_config
from ray.test_utils import recursive_fnmatch
RAY_PATH = os.path.abspath(os.path.join(__file__, "../../"))
CONFIG_PATHS = recursive_fnmatch(
os.path.join(RAY_PATH, "autoscaler"), "*.yaml")
CONFIG_PATHS += recursive_fnmatch(
os.path.join(RAY_PATH, "tune/examples/"), "*.yaml")
class AutoscalingConfigTest(unittest.TestCase):
def testValidateDefaultConfig(self):
for config_path in CONFIG_PATHS:
with open(config_path) as f:
config = yaml.safe_load(f)
config = fillout_defaults(config)
try:
validate_config(config)
except Exception:
self.fail("Config did not pass validation test!")
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_basic.py
|
Python
|
# coding: utf-8
import collections
import io
import json
import logging
import os
import re
import string
import sys
import threading
import time
import numpy as np
import pytest
import ray
from ray.exceptions import RayTimeoutError
import ray.cluster_utils
import ray.test_utils
logger = logging.getLogger(__name__)
# https://github.com/ray-project/ray/issues/6662
def test_ignore_http_proxy(shutdown_only):
ray.init(num_cpus=1)
os.environ["http_proxy"] = "http://example.com"
os.environ["https_proxy"] = "http://example.com"
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_background_tasks_with_max_calls(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def g():
time.sleep(.1)
return 0
@ray.remote(max_calls=1, max_retries=0)
def f():
return [g.remote()]
nested = ray.get([f.remote() for _ in range(10)])
# Should still be able to retrieve these objects, since f's workers will
# wait for g to finish before exiting.
ray.get([x[0] for x in nested])
def test_fair_queueing(shutdown_only):
ray.init(
num_cpus=1, _internal_config=json.dumps({
"fair_queueing_enabled": 1
}))
@ray.remote
def h():
return 0
@ray.remote
def g():
return ray.get(h.remote())
@ray.remote
def f():
return ray.get(g.remote())
# This will never finish without fair queueing of {f, g, h}:
# https://github.com/ray-project/ray/issues/3644
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=60.0, num_returns=1000)
assert len(ready) == 1000, len(ready)
def complex_serialization(use_pickle):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo:
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar:
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz:
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux:
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass:
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
# Test StringIO serialization
s = io.StringIO(u"Hello, world!\n")
s.seek(0)
line = s.readline()
s.seek(0)
assert ray.get(ray.put(s)).readline() == line
def test_complex_serialization(ray_start_regular):
complex_serialization(use_pickle=False)
def test_complex_serialization_with_pickle(shutdown_only):
ray.init(use_pickle=True)
complex_serialization(use_pickle=True)
def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA:
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
if ray.worker.global_worker.use_pickle:
# Serialize the recursive objects.
for obj in recursive_objects:
ray.put(obj)
else:
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo:
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo:
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def custom_serializers():
class Foo:
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar:
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_custom_serializers(ray_start_regular):
custom_serializers()
def test_custom_serializers_with_pickle(shutdown_only):
ray.init(use_pickle=True)
custom_serializers()
class Foo:
def __init__(self):
self.x = 4
# Test the pickle serialization backend without serializer.
# NOTE: 'use_pickle' here is different from 'use_pickle' in
# ray.init
ray.register_custom_serializer(Foo, use_pickle=True)
@ray.remote
def f():
return Foo()
assert type(ray.get(f.remote())) == Foo
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass:
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo:
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1:
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2:
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass:
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0:
def method0(self):
pass
c0 = Class0()
class Class0:
def method1(self):
pass
c1 = Class0()
class Class0:
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0:
def method0(self):
pass
c0 = Class0()
class Class0:
def method1(self):
pass
c1 = Class0()
class Class0:
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_args_starkwargs(ray_start_regular):
def starkwargs(a, b, **kwargs):
return a, b, kwargs
class TestActor:
def starkwargs(self, a, b, **kwargs):
return a, b, kwargs
def test_function(fn, remote_fn):
assert fn(1, 2, x=3) == ray.get(remote_fn.remote(1, 2, x=3))
with pytest.raises(TypeError):
remote_fn.remote(3)
remote_test_function = ray.remote(test_function)
remote_starkwargs = ray.remote(starkwargs)
test_function(starkwargs, remote_starkwargs)
ray.get(remote_test_function.remote(starkwargs, remote_starkwargs))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.starkwargs
local_actor = TestActor()
local_method = local_actor.starkwargs
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_args_named_and_star(ray_start_regular):
def hello(a, x="hello", **kwargs):
return a, x, kwargs
class TestActor:
def hello(self, a, x="hello", **kwargs):
return a, x, kwargs
def test_function(fn, remote_fn):
assert fn(1, x=2, y=3) == ray.get(remote_fn.remote(1, x=2, y=3))
assert fn(1, 2, y=3) == ray.get(remote_fn.remote(1, 2, y=3))
assert fn(1, y=3) == ray.get(remote_fn.remote(1, y=3))
assert fn(1, ) == ray.get(remote_fn.remote(1, ))
assert fn(1) == ray.get(remote_fn.remote(1))
with pytest.raises(TypeError):
remote_fn.remote(1, 2, x=3)
remote_test_function = ray.remote(test_function)
remote_hello = ray.remote(hello)
test_function(hello, remote_hello)
ray.get(remote_test_function.remote(hello, remote_hello))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.hello
local_actor = TestActor()
local_method = local_actor.hello
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_args_stars_after(ray_start_regular):
def star_args_after(a="hello", b="heo", *args, **kwargs):
return a, b, args, kwargs
class TestActor:
def star_args_after(self, a="hello", b="heo", *args, **kwargs):
return a, b, args, kwargs
def test_function(fn, remote_fn):
assert fn("hi", "hello", 2) == ray.get(
remote_fn.remote("hi", "hello", 2))
assert fn(
"hi", "hello", 2, hi="hi") == ray.get(
remote_fn.remote("hi", "hello", 2, hi="hi"))
assert fn(hi="hi") == ray.get(remote_fn.remote(hi="hi"))
remote_test_function = ray.remote(test_function)
remote_star_args_after = ray.remote(star_args_after)
test_function(star_args_after, remote_star_args_after)
ray.get(
remote_test_function.remote(star_args_after, remote_star_args_after))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.star_args_after
local_actor = TestActor()
local_method = local_actor.star_args_after
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_redefining_remote_functions(shutdown_only):
ray.init(num_cpus=1)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Check that we can redefine functions even when the remote function source
# doesn't change (see https://github.com/ray-project/ray/issues/6130).
@ray.remote
def g():
return nonexistent()
with pytest.raises(ray.exceptions.RayTaskError, match="nonexistent"):
ray.get(g.remote())
def nonexistent():
return 1
# Redefine the function and make sure it succeeds.
@ray.remote
def g():
return nonexistent()
assert ray.get(g.remote()) == 1
# Check the same thing but when the redefined function is inside of another
# task.
@ray.remote
def h(i):
@ray.remote
def j():
return i
return j.remote()
for i in range(20):
assert ray.get(ray.get(h.remote(i))) == i
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor:
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2:
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if (ray.available_resources()["CPU"] == 2.0
and ray.available_resources()["GPU"] == 2.0
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_get_with_timeout(ray_start_regular):
@ray.remote
def f(a):
time.sleep(a)
return a
assert ray.get(f.remote(3), timeout=10) == 3
obj_id = f.remote(3)
with pytest.raises(RayTimeoutError):
ray.get(obj_id, timeout=2)
assert ray.get(obj_id, timeout=2) == 3
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 1,
}, {
"num_cpus": 1,
"num_nodes": 2,
}],
indirect=True)
def test_direct_call_simple(ray_start_cluster):
@ray.remote
def f(x):
return x + 1
f_direct = f.options(is_direct_call=True)
assert ray.get(f_direct.remote(2)) == 3
for _ in range(10):
assert ray.get([f_direct.remote(i) for i in range(100)]) == list(
range(1, 101))
# https://github.com/ray-project/ray/issues/6329
def test_call_actors_indirect_through_tasks(ray_start_regular):
@ray.remote
class Counter:
def __init__(self, value):
self.value = int(value)
def increase(self, delta):
self.value += int(delta)
return self.value
@ray.remote
def foo(object):
return ray.get(object.increase.remote(1))
@ray.remote
def bar(object):
return ray.get(object.increase.remote(1))
@ray.remote
def zoo(object):
return ray.get(object[0].increase.remote(1))
c = Counter.remote(0)
for _ in range(0, 100):
ray.get(foo.remote(c))
ray.get(bar.remote(c))
ray.get(zoo.remote([c]))
def test_direct_call_refcount(ray_start_regular):
@ray.remote
def f(x):
return x + 1
@ray.remote
def sleep():
time.sleep(.1)
return 1
# Multiple gets should not hang with ref counting enabled.
f_direct = f.options(is_direct_call=True)
x = f_direct.remote(2)
ray.get(x)
ray.get(x)
# Temporary objects should be retained for chained callers.
y = f_direct.remote(sleep.options(is_direct_call=True).remote())
assert ray.get(y) == 2
def test_direct_call_matrix(shutdown_only):
ray.init(object_store_memory=1000 * 1024 * 1024)
@ray.remote
class Actor:
def small_value(self):
return 0
def large_value(self):
return np.zeros(10 * 1024 * 1024)
def echo(self, x):
if isinstance(x, list):
x = ray.get(x[0])
return x
@ray.remote
def small_value():
return 0
@ray.remote
def large_value():
return np.zeros(10 * 1024 * 1024)
@ray.remote
def echo(x):
if isinstance(x, list):
x = ray.get(x[0])
return x
def check(source_actor, dest_actor, is_large, out_of_band):
print("CHECKING", "actor" if source_actor else "task", "to", "actor"
if dest_actor else "task", "large_object"
if is_large else "small_object", "out_of_band"
if out_of_band else "in_band")
if source_actor:
a = Actor.options(is_direct_call=True).remote()
if is_large:
x_id = a.large_value.remote()
else:
x_id = a.small_value.remote()
else:
if is_large:
x_id = large_value.options(is_direct_call=True).remote()
else:
x_id = small_value.options(is_direct_call=True).remote()
if out_of_band:
x_id = [x_id]
if dest_actor:
b = Actor.options(is_direct_call=True).remote()
x = ray.get(b.echo.remote(x_id))
else:
x = ray.get(echo.options(is_direct_call=True).remote(x_id))
if is_large:
assert isinstance(x, np.ndarray)
else:
assert isinstance(x, int)
for is_large in [False, True]:
for source_actor in [False, True]:
for dest_actor in [False, True]:
for out_of_band in [False, True]:
check(source_actor, dest_actor, is_large, out_of_band)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 1,
}, {
"num_cpus": 1,
"num_nodes": 2,
}],
indirect=True)
def test_direct_call_chain(ray_start_cluster):
@ray.remote
def g(x):
return x + 1
g_direct = g.options(is_direct_call=True)
x = 0
for _ in range(100):
x = g_direct.remote(x)
assert ray.get(x) == 100
def test_direct_inline_arg_memory_corruption(ray_start_regular):
@ray.remote
def f():
return np.zeros(1000, dtype=np.uint8)
@ray.remote
class Actor:
def __init__(self):
self.z = []
def add(self, x):
self.z.append(x)
for prev in self.z:
assert np.sum(prev) == 0, ("memory corruption detected", prev)
a = Actor.options(is_direct_call=True).remote()
f_direct = f.options(is_direct_call=True)
for i in range(100):
ray.get(a.add.remote(f_direct.remote()))
def test_direct_actor_enabled(ray_start_regular):
@ray.remote
class Actor:
def __init__(self):
pass
def f(self, x):
return x * 2
a = Actor._remote(is_direct_call=True)
obj_id = a.f.remote(1)
# it is not stored in plasma
assert not ray.worker.global_worker.core_worker.object_exists(obj_id)
assert ray.get(obj_id) == 2
def test_direct_actor_order(shutdown_only):
ray.init(num_cpus=4)
@ray.remote
def small_value():
time.sleep(0.01 * np.random.randint(0, 10))
return 0
@ray.remote
class Actor:
def __init__(self):
self.count = 0
def inc(self, count, dependency):
assert count == self.count
self.count += 1
return count
a = Actor._remote(is_direct_call=True)
assert ray.get([
a.inc.remote(i, small_value.options(is_direct_call=True).remote())
for i in range(100)
]) == list(range(100))
def test_direct_actor_large_objects(ray_start_regular):
@ray.remote
class Actor:
def __init__(self):
pass
def f(self):
time.sleep(1)
return np.zeros(10000000)
a = Actor._remote(is_direct_call=True)
obj_id = a.f.remote()
assert not ray.worker.global_worker.core_worker.object_exists(obj_id)
done, _ = ray.wait([obj_id])
assert len(done) == 1
assert ray.worker.global_worker.core_worker.object_exists(obj_id)
assert isinstance(ray.get(obj_id), np.ndarray)
def test_direct_actor_pass_by_ref(ray_start_regular):
@ray.remote
class Actor:
def __init__(self):
pass
def f(self, x):
return x * 2
@ray.remote
def f(x):
return x
@ray.remote
def error():
sys.exit(0)
a = Actor._remote(is_direct_call=True)
assert ray.get(a.f.remote(f.remote(1))) == 2
fut = [a.f.remote(f.remote(i)) for i in range(100)]
assert ray.get(fut) == [i * 2 for i in range(100)]
# propagates errors for pass by ref
with pytest.raises(Exception):
ray.get(a.f.remote(error.remote()))
def test_direct_actor_pass_by_ref_order_optimization(shutdown_only):
ray.init(num_cpus=4)
@ray.remote
class Actor:
def __init__(self):
pass
def f(self, x):
pass
a = Actor._remote(is_direct_call=True)
@ray.remote
def fast_value():
print("fast value")
pass
@ray.remote
def slow_value():
print("start sleep")
time.sleep(30)
@ray.remote
def runner(f):
print("runner", a, f)
return ray.get(a.f.remote(f.remote()))
runner.remote(slow_value)
time.sleep(1)
x2 = runner.remote(fast_value)
start = time.time()
ray.get(x2)
delta = time.time() - start
assert delta < 10, "did not skip slow value"
def test_direct_actor_recursive(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, delegate=None):
self.delegate = delegate
def f(self, x):
if self.delegate:
return ray.get(self.delegate.f.remote(x))
return x * 2
a = Actor._remote(is_direct_call=True)
b = Actor._remote(args=[a], is_direct_call=True)
c = Actor._remote(args=[b], is_direct_call=True)
result = ray.get([c.f.remote(i) for i in range(100)])
assert result == [x * 2 for x in range(100)]
result, _ = ray.wait([c.f.remote(i) for i in range(100)], num_returns=100)
result = ray.get(result)
assert result == [x * 2 for x in range(100)]
def test_direct_actor_concurrent(ray_start_regular):
@ray.remote
class Batcher:
def __init__(self):
self.batch = []
self.event = threading.Event()
def add(self, x):
self.batch.append(x)
if len(self.batch) >= 3:
self.event.set()
else:
self.event.wait()
return sorted(self.batch)
a = Batcher.options(is_direct_call=True, max_concurrency=3).remote()
x1 = a.add.remote(1)
x2 = a.add.remote(2)
x3 = a.add.remote(3)
r1 = ray.get(x1)
r2 = ray.get(x2)
r3 = ray.get(x3)
assert r1 == [1, 2, 3]
assert r1 == r2 == r3
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return
object_ids = [f.remote(0), f.remote(0), f.remote(0), f.remote(0)]
ready_ids, remaining_ids = ray.wait(object_ids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(object_ids, num_returns=4)
assert set(ready_ids) == set(object_ids)
assert remaining_ids == []
object_ids = [f.remote(0), f.remote(5)]
ready_ids, remaining_ids = ray.wait(object_ids, timeout=0.5, num_returns=2)
assert len(ready_ids) == 1
assert len(remaining_ids) == 1
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_component_failures.py
|
Python
|
import os
import signal
import sys
import time
import pytest
import ray
from ray.test_utils import run_string_as_driver_nonblocking
# This test checks that when a worker dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_get(ray_start_2_cpus):
@ray.remote
def sleep_forever():
ray.experimental.signal.send("ready")
time.sleep(10**6)
@ray.remote
def get_worker_pid():
return os.getpid()
x_id = sleep_forever.remote()
ray.experimental.signal.receive([x_id]) # Block until it is scheduled.
# Get the PID of the other worker.
worker_pid = ray.get(get_worker_pid.remote())
@ray.remote
def f(id_in_a_list):
ray.get(id_in_a_list[0])
# Have the worker wait in a get call.
result_id = f.remote([x_id])
time.sleep(1)
# Make sure the task hasn't finished.
ready_ids, _ = ray.wait([result_id], timeout=0)
assert len(ready_ids) == 0
# Kill the worker.
os.kill(worker_pid, signal.SIGKILL)
time.sleep(0.1)
# Make sure the sleep task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.put_object(1, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_get(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.get(ray.ObjectID(ray.utils.hex_to_binary("{}")))
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.put_object(1, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a worker dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_wait(ray_start_2_cpus):
@ray.remote
def sleep_forever():
time.sleep(10**6)
@ray.remote
def get_pid():
return os.getpid()
x_id = sleep_forever.remote()
# Get the PID of the worker that block_in_wait will run on (sleep a little
# to make sure that sleep_forever has already started).
time.sleep(0.1)
worker_pid = ray.get(get_pid.remote())
@ray.remote
def block_in_wait(object_id_in_list):
ray.wait(object_id_in_list)
# Have the worker wait in a wait call.
block_in_wait.remote([x_id])
time.sleep(0.1)
# Kill the worker.
os.kill(worker_pid, signal.SIGKILL)
time.sleep(0.1)
# Create the object.
ray.worker.global_worker.put_object(1, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_wait(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.wait([ray.ObjectID(ray.utils.hex_to_binary("{}"))])
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# wait can return.
ray.worker.global_worker.put_object(1, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_component_failures_2.py
|
Python
|
import json
import os
import signal
import sys
import time
import pytest
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import RayTestTimeoutException
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_ids = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_ids += [f.remote(object_id) for object_id in object_ids]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_id in object_ids:
try:
ray.get(object_id)
except (ray.exceptions.RayTaskError, ray.exceptions.RayWorkerError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
return x
@ray.remote
def g(*xs):
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 8,
"num_nodes": 4,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 100
}),
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
# The plasma stores should still be alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
True)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 8,
"num_nodes": 2,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 100
}),
}],
indirect=True)
def test_plasma_store_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all plasma stores on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE)
# No processes should be left alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
False)
check_components_alive(cluster, ray_constants.PROCESS_TYPE_RAYLET, False)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_component_failures_3.py
|
Python
|
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 4,
"num_nodes": 3,
"do_init": True
}],
indirect=True)
def test_actor_creation_node_failure(ray_start_cluster):
# TODO(swang): Refactor test_raylet_failed, etc to reuse the below code.
cluster = ray_start_cluster
@ray.remote
class Child:
def __init__(self, death_probability):
self.death_probability = death_probability
def get_probability(self):
return self.death_probability
def ping(self):
# Exit process with some probability.
exit_chance = np.random.rand()
if exit_chance < self.death_probability:
sys.exit(-1)
num_children = 25
# Children actors will die about half the time.
death_probability = 0.5
children = [Child.remote(death_probability) for _ in range(num_children)]
while len(cluster.list_all_nodes()) > 1:
for j in range(2):
# Submit some tasks on the actors. About half of the actors will
# fail.
children_out = [child.ping.remote() for child in children]
# Wait a while for all the tasks to complete. This should trigger
# reconstruction for any actor creation tasks that were forwarded
# to nodes that then failed.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Replace any actors that died.
for i, out in enumerate(children_out):
try:
ray.get(out)
except ray.exceptions.RayActorError:
children[i] = Child.remote(death_probability)
children_out = [
child.get_probability.remote() for child in children
]
# Wait for new created actors to finish creation before
# removing a node. This is needed because right now we don't
# support reconstructing actors that died in the process of
# being created.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Remove a node. Any actor creation tasks that were forwarded to this
# node must be reconstructed.
cluster.remove_node(cluster.list_all_nodes()[-1])
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_sequential(ray_start_regular):
ray.worker._global_node.kill_raylet()
ray.worker._global_node.kill_plasma_store()
ray.worker._global_node.kill_log_monitor()
ray.worker._global_node.kill_monitor()
ray.worker._global_node.kill_raylet_monitor()
# If the driver can reach the tearDown method, then it is still alive.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_parallel(ray_start_regular):
all_processes = ray.worker._global_node.all_processes
process_infos = (all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET] +
all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR])
assert len(process_infos) == 5
# Kill all the components in parallel.
for process_info in process_infos:
process_info.process.terminate()
time.sleep(0.1)
for process_info in process_infos:
process_info.process.kill()
for process_info in process_infos:
process_info.process.wait()
# If the driver can reach the tearDown method, then it is still alive.
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_cython.py
|
Python
|
from __future__ import absolute_import
from __future__ import print_function
import math
import numpy as np
import unittest
import ray
import cython_examples as cyth
def get_ray_result(cython_func, *args):
func = ray.remote(cython_func)
return ray.get(func.remote(*args))
class CythonTest(unittest.TestCase):
def setUp(self):
ray.init(object_store_memory=int(150 * 1024 * 1024))
def tearDown(self):
ray.shutdown()
def assertEqualHelper(self, cython_func, expected, *args):
assert get_ray_result(cython_func, *args) == expected
def test_simple_func(self):
self.assertEqualHelper(cyth.simple_func, 6, 1, 2, 3)
self.assertEqualHelper(cyth.fib, 55, 10)
self.assertEqualHelper(cyth.fib_int, 55, 10)
self.assertEqualHelper(cyth.fib_cpdef, 55, 10)
self.assertEqualHelper(cyth.fib_cdef, 55, 10)
def test_simple_class(self):
cls = ray.remote(cyth.simple_class)
a1 = cls.remote()
a2 = cls.remote()
result1 = ray.get(a1.increment.remote())
result2 = ray.get(a2.increment.remote())
result3 = ray.get(a2.increment.remote())
assert result1 == 1
assert result2 == 1
assert result3 == 2
def test_numpy(self):
array = np.array([-1.0, 0.0, 1.0, 2.0])
answer = [float("-inf") if x <= 0 else math.log(x) for x in array]
result = get_ray_result(cyth.masked_log, array)
np.testing.assert_array_equal(answer, result)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_debug_tools.py
|
Python
|
import os
import subprocess
import sys
import pytest
import ray
@pytest.fixture
def ray_gdb_start():
# Setup environment and start ray
_environ = os.environ.copy()
for process_name in ["RAYLET", "PLASMA_STORE"]:
os.environ["RAY_{}_GDB".format(process_name)] = "1"
os.environ["RAY_{}_TMUX".format(process_name)] = "1"
yield None
# Restore original environment and stop ray
os.environ.clear()
os.environ.update(_environ)
ray.shutdown()
@pytest.mark.skipif(
sys.platform != "linux" and sys.platform != "linux2",
reason="This test requires Linux.")
def test_raylet_gdb(ray_gdb_start):
# ray_gdb_start yields the expected process name
ray.init(num_cpus=1)
@ray.remote
def f():
return 42
assert ray.get(f.remote()) == 42
# Check process name in `ps aux | grep gdb`
for process_name in ["raylet/raylet", "plasma/plasma_store_server"]:
pgrep_command = subprocess.Popen(
["pgrep", "-f", "gdb.*{}".format(process_name)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert pgrep_command.communicate()[0]
if __name__ == "__main__":
import pytest
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_dynres.py
|
Python
|
import logging
import time
import ray
import ray.cluster_utils
import ray.test_utils
logger = logging.getLogger(__name__)
def test_dynamic_res_creation(ray_start_regular):
# This test creates a resource locally (without specifying the client_id)
res_name = "test_res"
res_capacity = 1.0
@ray.remote
def set_res(resource_name, resource_capacity):
ray.experimental.set_resource(resource_name, resource_capacity)
ray.get(set_res.remote(res_name, res_capacity))
available_res = ray.available_resources()
cluster_res = ray.cluster_resources()
assert available_res[res_name] == res_capacity
assert cluster_res[res_name] == res_capacity
def test_dynamic_res_deletion(shutdown_only):
# This test deletes a resource locally (without specifying the client_id)
res_name = "test_res"
res_capacity = 1.0
ray.init(num_cpus=1, resources={res_name: res_capacity})
@ray.remote
def delete_res(resource_name):
ray.experimental.set_resource(resource_name, 0)
ray.get(delete_res.remote(res_name))
available_res = ray.available_resources()
cluster_res = ray.cluster_resources()
assert res_name not in available_res
assert res_name not in cluster_res
def test_dynamic_res_infeasible_rescheduling(ray_start_regular):
# This test launches an infeasible task and then creates a
# resource to make the task feasible. This tests if the
# infeasible tasks get rescheduled when resources are
# created at runtime.
res_name = "test_res"
res_capacity = 1.0
@ray.remote
def set_res(resource_name, resource_capacity):
ray.experimental.set_resource(resource_name, resource_capacity)
def f():
return 1
remote_task = ray.remote(resources={res_name: res_capacity})(f)
oid = remote_task.remote() # This is infeasible
ray.get(set_res.remote(res_name, res_capacity)) # Now should be feasible
available_res = ray.available_resources()
assert available_res[res_name] == res_capacity
successful, unsuccessful = ray.wait([oid], timeout=1)
assert successful # The task completed
def test_dynamic_res_updation_clientid(ray_start_cluster):
# This test does a simple resource capacity update
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 1.0
num_nodes = 3
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
target_node_id = ray.nodes()[1]["NodeID"]
@ray.remote
def set_res(resource_name, resource_capacity, client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=client_id)
# Create resource
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
# Update resource
new_capacity = res_capacity + 1
ray.get(set_res.remote(res_name, new_capacity, target_node_id))
target_node = next(
node for node in ray.nodes() if node["NodeID"] == target_node_id)
resources = target_node["Resources"]
assert res_name in resources
assert resources[res_name] == new_capacity
def test_dynamic_res_creation_clientid(ray_start_cluster):
# Creates a resource on a specific client and verifies creation.
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 1.0
num_nodes = 3
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
target_node_id = ray.nodes()[1]["NodeID"]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
target_node = next(
node for node in ray.nodes() if node["NodeID"] == target_node_id)
resources = target_node["Resources"]
assert res_name in resources
assert resources[res_name] == res_capacity
def test_dynamic_res_creation_clientid_multiple(ray_start_cluster):
# This test creates resources on multiple clients using the clientid
# specifier
cluster = ray_start_cluster
TIMEOUT = 5
res_name = "test_res"
res_capacity = 1.0
num_nodes = 3
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
target_node_ids = [node["NodeID"] for node in ray.nodes()]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
results = []
for nid in target_node_ids:
results.append(set_res.remote(res_name, res_capacity, nid))
ray.get(results)
success = False
start_time = time.time()
while time.time() - start_time < TIMEOUT and not success:
resources_created = []
for nid in target_node_ids:
target_node = next(
node for node in ray.nodes() if node["NodeID"] == nid)
resources = target_node["Resources"]
resources_created.append(resources[res_name] == res_capacity)
success = all(resources_created)
assert success
def test_dynamic_res_deletion_clientid(ray_start_cluster):
# This test deletes a resource on a given client id
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 1.0
num_nodes = 5
for i in range(num_nodes):
# Create resource on all nodes, but later we'll delete it from a
# target node
cluster.add_node(resources={res_name: res_capacity})
ray.init(address=cluster.address)
target_node_id = ray.nodes()[1]["NodeID"]
# Launch the delete task
@ray.remote
def delete_res(resource_name, res_client_id):
ray.experimental.set_resource(
resource_name, 0, client_id=res_client_id)
ray.get(delete_res.remote(res_name, target_node_id))
target_node = next(
node for node in ray.nodes() if node["NodeID"] == target_node_id)
resources = target_node["Resources"]
print(ray.cluster_resources())
assert res_name not in resources
def test_dynamic_res_creation_scheduler_consistency(ray_start_cluster):
# This makes sure the resource is actually created and the state is
# consistent in the scheduler
# by launching a task which requests the created resource
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 1.0
num_nodes = 5
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
# Create the resource on node1
target_node_id = node_ids[1]
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
# Define a task which requires this resource
@ray.remote(resources={res_name: res_capacity})
def test_func():
return 1
result = test_func.remote()
successful, unsuccessful = ray.wait([result], timeout=5)
assert successful # The task completed
def test_dynamic_res_deletion_scheduler_consistency(ray_start_cluster):
# This makes sure the resource is actually deleted and the state is
# consistent in the scheduler by launching an infeasible task which
# requests the created resource
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 1.0
num_nodes = 5
TIMEOUT_DURATION = 1
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
@ray.remote
def delete_res(resource_name, res_client_id):
ray.experimental.set_resource(
resource_name, 0, client_id=res_client_id)
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
# Create the resource on node1
target_node_id = node_ids[1]
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
assert ray.cluster_resources()[res_name] == res_capacity
# Delete the resource
ray.get(delete_res.remote(res_name, target_node_id))
# Define a task which requires this resource. This should not run
@ray.remote(resources={res_name: res_capacity})
def test_func():
return 1
result = test_func.remote()
successful, unsuccessful = ray.wait([result], timeout=TIMEOUT_DURATION)
assert unsuccessful # The task did not complete because it's infeasible
def test_dynamic_res_concurrent_res_increment(ray_start_cluster):
# This test makes sure resource capacity is updated (increment) correctly
# when a task has already acquired some of the resource.
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 5
updated_capacity = 10
num_nodes = 5
TIMEOUT_DURATION = 1
# Create a object ID to have the task wait on
WAIT_OBJECT_ID_STR = ("a" * 20).encode("ascii")
# Create a object ID to signal that the task is running
TASK_RUNNING_OBJECT_ID_STR = ("b" * 20).encode("ascii")
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
target_node_id = node_ids[1]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
# Create the resource on node 1
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
assert ray.cluster_resources()[res_name] == res_capacity
# Task to hold the resource till the driver signals to finish
@ray.remote
def wait_func(running_oid, wait_oid):
# Signal that the task is running
ray.worker.global_worker.put_object(1, ray.ObjectID(running_oid))
# Make the task wait till signalled by driver
ray.get(ray.ObjectID(wait_oid))
@ray.remote
def test_func():
return 1
# Launch the task with resource requirement of 4, thus the new available
# capacity becomes 1
task = wait_func._remote(
args=[TASK_RUNNING_OBJECT_ID_STR, WAIT_OBJECT_ID_STR],
resources={res_name: 4})
# Wait till wait_func is launched before updating resource
ray.get(ray.ObjectID(TASK_RUNNING_OBJECT_ID_STR))
# Update the resource capacity
ray.get(set_res.remote(res_name, updated_capacity, target_node_id))
# Signal task to complete
ray.worker.global_worker.put_object(1, ray.ObjectID(WAIT_OBJECT_ID_STR))
ray.get(task)
# Check if scheduler state is consistent by launching a task requiring
# updated capacity
task_2 = test_func._remote(args=[], resources={res_name: updated_capacity})
successful, unsuccessful = ray.wait([task_2], timeout=TIMEOUT_DURATION)
assert successful # The task completed
# Check if scheduler state is consistent by launching a task requiring
# updated capacity + 1. This should not execute
task_3 = test_func._remote(
args=[], resources={res_name: updated_capacity + 1
}) # This should be infeasible
successful, unsuccessful = ray.wait([task_3], timeout=TIMEOUT_DURATION)
assert unsuccessful # The task did not complete because it's infeasible
assert ray.available_resources()[res_name] == updated_capacity
def test_dynamic_res_concurrent_res_decrement(ray_start_cluster):
# This test makes sure resource capacity is updated (decremented)
# correctly when a task has already acquired some
# of the resource.
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 5
updated_capacity = 2
num_nodes = 5
TIMEOUT_DURATION = 1
# Create a object ID to have the task wait on
WAIT_OBJECT_ID_STR = ("a" * 20).encode("ascii")
# Create a object ID to signal that the task is running
TASK_RUNNING_OBJECT_ID_STR = ("b" * 20).encode("ascii")
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
target_node_id = node_ids[1]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
# Create the resource on node 1
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
assert ray.cluster_resources()[res_name] == res_capacity
# Task to hold the resource till the driver signals to finish
@ray.remote
def wait_func(running_oid, wait_oid):
# Signal that the task is running
ray.worker.global_worker.put_object(1, ray.ObjectID(running_oid))
# Make the task wait till signalled by driver
ray.get(ray.ObjectID(wait_oid))
@ray.remote
def test_func():
return 1
# Launch the task with resource requirement of 4, thus the new available
# capacity becomes 1
task = wait_func._remote(
args=[TASK_RUNNING_OBJECT_ID_STR, WAIT_OBJECT_ID_STR],
resources={res_name: 4})
# Wait till wait_func is launched before updating resource
ray.get(ray.ObjectID(TASK_RUNNING_OBJECT_ID_STR))
# Decrease the resource capacity
ray.get(set_res.remote(res_name, updated_capacity, target_node_id))
# Signal task to complete
ray.worker.global_worker.put_object(1, ray.ObjectID(WAIT_OBJECT_ID_STR))
ray.get(task)
# Check if scheduler state is consistent by launching a task requiring
# updated capacity
task_2 = test_func._remote(args=[], resources={res_name: updated_capacity})
successful, unsuccessful = ray.wait([task_2], timeout=TIMEOUT_DURATION)
assert successful # The task completed
# Check if scheduler state is consistent by launching a task requiring
# updated capacity + 1. This should not execute
task_3 = test_func._remote(
args=[], resources={res_name: updated_capacity + 1
}) # This should be infeasible
successful, unsuccessful = ray.wait([task_3], timeout=TIMEOUT_DURATION)
assert unsuccessful # The task did not complete because it's infeasible
assert ray.available_resources()[res_name] == updated_capacity
def test_dynamic_res_concurrent_res_delete(ray_start_cluster):
# This test makes sure resource gets deleted correctly when a task has
# already acquired the resource
cluster = ray_start_cluster
res_name = "test_res"
res_capacity = 5
num_nodes = 5
TIMEOUT_DURATION = 1
# Create a object ID to have the task wait on
WAIT_OBJECT_ID_STR = ("a" * 20).encode("ascii")
# Create a object ID to signal that the task is running
TASK_RUNNING_OBJECT_ID_STR = ("b" * 20).encode("ascii")
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
target_node_id = node_ids[1]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
@ray.remote
def delete_res(resource_name, res_client_id):
ray.experimental.set_resource(
resource_name, 0, client_id=res_client_id)
# Create the resource on node 1
ray.get(set_res.remote(res_name, res_capacity, target_node_id))
assert ray.cluster_resources()[res_name] == res_capacity
# Task to hold the resource till the driver signals to finish
@ray.remote
def wait_func(running_oid, wait_oid):
# Signal that the task is running
ray.worker.global_worker.put_object(1, ray.ObjectID(running_oid))
# Make the task wait till signalled by driver
ray.get(ray.ObjectID(wait_oid))
@ray.remote
def test_func():
return 1
# Launch the task with resource requirement of 4, thus the new available
# capacity becomes 1
task = wait_func._remote(
args=[TASK_RUNNING_OBJECT_ID_STR, WAIT_OBJECT_ID_STR],
resources={res_name: 4})
# Wait till wait_func is launched before updating resource
ray.get(ray.ObjectID(TASK_RUNNING_OBJECT_ID_STR))
# Delete the resource
ray.get(delete_res.remote(res_name, target_node_id))
# Signal task to complete
ray.worker.global_worker.put_object(1, ray.ObjectID(WAIT_OBJECT_ID_STR))
ray.get(task)
# Check if scheduler state is consistent by launching a task requiring
# the deleted resource This should not execute
task_2 = test_func._remote(
args=[], resources={res_name: 1}) # This should be infeasible
successful, unsuccessful = ray.wait([task_2], timeout=TIMEOUT_DURATION)
assert unsuccessful # The task did not complete because it's infeasible
assert res_name not in ray.available_resources()
def test_dynamic_res_creation_stress(ray_start_cluster):
# This stress tests creates many resources simultaneously on the same
# client and then checks if the final state is consistent
cluster = ray_start_cluster
TIMEOUT = 5
res_capacity = 1
num_nodes = 5
NUM_RES_TO_CREATE = 500
for i in range(num_nodes):
cluster.add_node()
ray.init(address=cluster.address)
node_ids = [node["NodeID"] for node in ray.nodes()]
target_node_id = node_ids[1]
@ray.remote
def set_res(resource_name, resource_capacity, res_client_id):
ray.experimental.set_resource(
resource_name, resource_capacity, client_id=res_client_id)
@ray.remote
def delete_res(resource_name, res_client_id):
ray.experimental.set_resource(
resource_name, 0, client_id=res_client_id)
results = [
set_res.remote(str(i), res_capacity, target_node_id)
for i in range(0, NUM_RES_TO_CREATE)
]
ray.get(results)
success = False
start_time = time.time()
while time.time() - start_time < TIMEOUT and not success:
resources = ray.cluster_resources()
all_resources_created = []
for i in range(0, NUM_RES_TO_CREATE):
all_resources_created.append(str(i) in resources)
success = all(all_resources_created)
assert success
def test_release_cpus_when_actor_creation_task_blocking(shutdown_only):
ray.init(num_cpus=2)
@ray.remote(num_cpus=1)
def get_100():
time.sleep(1)
return 100
@ray.remote(num_cpus=1)
class A:
def __init__(self):
self.num = ray.get(get_100.remote())
def get_num(self):
return self.num
a = A.remote()
assert 100 == ray.get(a.get_num.remote())
def wait_until(condition, timeout_ms):
SLEEP_DURATION_MS = 100
time_elapsed = 0
while time_elapsed <= timeout_ms:
if condition():
return True
time.sleep(SLEEP_DURATION_MS)
time_elapsed += SLEEP_DURATION_MS
return False
def assert_available_resources():
return 1 == ray.available_resources()["CPU"]
result = wait_until(assert_available_resources, 1000)
assert result is True
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_failure.py
|
Python
|
import json
import logging
import os
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import (
relevant_errors,
wait_for_errors,
RayTestTimeoutException,
)
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) >= 2, errors
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
ray.experimental.signal.reset = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor:
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(ray_start_2_cpus):
@ray.remote
def f():
pass
# Wait for the monitor process to start.
ray.get(f.remote())
time.sleep(1)
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylet and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.Value("HEARTBEAT_BATCH"),
ray.gcs_utils.TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB"), fake_id,
malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
@pytest.mark.skipif(RAY_FORCE_DIRECT, reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(shutdown_only):
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
wait_for_errors(ray_constants.RESOURCE_DEADLOCK_ERROR, 1, timeout=30)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(ray.ObjectID.from_random())
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1, timeout=2)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(RayTestTimeoutException):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8
}],
indirect=True)
@pytest.mark.parametrize("num_actors", [1, 2, 5])
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head, num_actors):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(num_actors)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 2,
"object_store_memory": 10**8
}],
indirect=True)
def test_fill_object_store_exception(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
while ray.worker.global_worker.core_worker.object_exists(obj):
time.sleep(1)
# ray.get throws an exception.
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_serialized_id_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
@ray.remote
def get(obj_ids):
obj_id = obj_ids[0]
assert (isinstance(ray.get(obj_id), np.ndarray))
# Wait for the object to be evicted.
ray.internal.free(obj_id)
while ray.worker.global_worker.core_worker.object_exists(obj_id):
time.sleep(1)
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj_id)
print("get done", obj_ids)
obj = large_object.remote()
result = get.remote([obj])
ray.internal.free(obj)
ray.get(result)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_ids, test_dependent_task):
print("get", obj_ids)
obj_id = obj_ids[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_id)) == 1
else:
assert ray.get(obj_id) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_global_state.py
|
Python
|
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import time
import ray
# TODO(rliaw): The proper way to do this is to have the pytest config setup.
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(10)
def test_replenish_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
assert cluster_resources == available_resources
@ray.remote
def cpu_task():
pass
ray.get(cpu_task.remote())
resources_reset = False
while not resources_reset:
available_resources = ray.available_resources()
resources_reset = (cluster_resources == available_resources)
assert resources_reset
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(10)
def test_uses_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
@ray.remote
def cpu_task():
time.sleep(1)
cpu_task.remote()
resource_used = False
while not resource_used:
available_resources = ray.available_resources()
resource_used = available_resources.get(
"CPU", 0) == cluster_resources.get("CPU", 0) - 1
assert resource_used
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.")
@pytest.mark.timeout(20)
def test_add_remove_cluster_resources(ray_start_cluster_head):
"""Tests that Global State API is consistent with actual cluster."""
cluster = ray_start_cluster_head
assert ray.cluster_resources()["CPU"] == 1
nodes = []
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 2
cluster.remove_node(nodes.pop())
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
for i in range(5):
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 6
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_iter.py
|
Python
|
import time
import ray
from ray.experimental.iter import from_items, from_iterators, from_range, \
from_actors, ParallelIteratorWorker
def test_from_items(ray_start_regular_shared):
it = from_items([1, 2, 3, 4])
assert repr(it) == "ParallelIterator[from_items[int, 4, shards=2]]"
assert list(it.gather_sync()) == [1, 2, 3, 4]
assert next(it.gather_sync()) == 1
def test_from_items_repeat(ray_start_regular_shared):
it = from_items([1, 2, 3, 4], repeat=True)
assert repr(
it) == "ParallelIterator[from_items[int, 4, shards=2, repeat=True]]"
assert it.take(8) == [1, 2, 3, 4, 1, 2, 3, 4]
def test_from_iterators(ray_start_regular_shared):
it = from_iterators([range(2), range(2)])
assert repr(it) == "ParallelIterator[from_iterators[shards=2]]"
assert list(it.gather_sync()) == [0, 0, 1, 1]
def test_from_range(ray_start_regular_shared):
it = from_range(4)
assert repr(it) == "ParallelIterator[from_range[4, shards=2]]"
assert list(it.gather_sync()) == [0, 2, 1, 3]
def test_from_actors(ray_start_regular_shared):
@ray.remote
class CustomWorker(ParallelIteratorWorker):
def __init__(self, data):
ParallelIteratorWorker.__init__(self, data, False)
a = CustomWorker.remote([1, 2])
b = CustomWorker.remote([3, 4])
it = from_actors([a, b])
assert repr(it) == "ParallelIterator[from_actors[shards=2]]"
assert list(it.gather_sync()) == [1, 3, 2, 4]
def test_for_each(ray_start_regular_shared):
it = from_range(4).for_each(lambda x: x * 2)
assert repr(it) == "ParallelIterator[from_range[4, shards=2].for_each()]"
assert list(it.gather_sync()) == [0, 4, 2, 6]
def test_combine(ray_start_regular_shared):
it = from_range(4, 1).combine(lambda x: [x, x])
assert repr(it) == "ParallelIterator[from_range[4, shards=1].combine()]"
assert list(it.gather_sync()) == [0, 0, 1, 1, 2, 2, 3, 3]
def test_chain(ray_start_regular_shared):
it = from_range(4).for_each(lambda x: x * 2).for_each(lambda x: x * 2)
assert repr(
it
) == "ParallelIterator[from_range[4, shards=2].for_each().for_each()]"
assert list(it.gather_sync()) == [0, 8, 4, 12]
def test_filter(ray_start_regular_shared):
it = from_range(4).filter(lambda x: x < 3)
assert repr(it) == "ParallelIterator[from_range[4, shards=2].filter()]"
assert list(it.gather_sync()) == [0, 2, 1]
def test_batch(ray_start_regular_shared):
it = from_range(4, 1).batch(2)
assert repr(it) == "ParallelIterator[from_range[4, shards=1].batch(2)]"
assert list(it.gather_sync()) == [[0, 1], [2, 3]]
def test_flatten(ray_start_regular_shared):
it = from_items([[1, 2], [3, 4]], 1).flatten()
assert repr(
it) == "ParallelIterator[from_items[list, 2, shards=1].flatten()]"
assert list(it.gather_sync()) == [1, 2, 3, 4]
def test_gather_sync(ray_start_regular_shared):
it = from_range(4)
it = it.gather_sync()
assert (
repr(it) == "LocalIterator[ParallelIterator[from_range[4, shards=2]]"
".gather_sync()]")
assert sorted(it) == [0, 1, 2, 3]
def test_gather_async(ray_start_regular_shared):
it = from_range(4)
it = it.gather_async()
assert (
repr(it) == "LocalIterator[ParallelIterator[from_range[4, shards=2]]"
".gather_async()]")
assert sorted(it) == [0, 1, 2, 3]
def test_batch_across_shards(ray_start_regular_shared):
it = from_iterators([[0, 1], [2, 3]])
it = it.batch_across_shards()
assert (
repr(it) == "LocalIterator[ParallelIterator[from_iterators[shards=2]]"
".batch_across_shards()]")
assert sorted(it) == [[0, 2], [1, 3]]
def test_remote(ray_start_regular_shared):
it = from_iterators([[0, 1], [3, 4], [5, 6, 7]])
assert it.num_shards() == 3
@ray.remote
def get_shard(it, i):
return list(it.get_shard(i))
assert ray.get(get_shard.remote(it, 0)) == [0, 1]
assert ray.get(get_shard.remote(it, 1)) == [3, 4]
assert ray.get(get_shard.remote(it, 2)) == [5, 6, 7]
@ray.remote
def check_remote(it):
assert ray.get(get_shard.remote(it, 0)) == [0, 1]
assert ray.get(get_shard.remote(it, 1)) == [3, 4]
assert ray.get(get_shard.remote(it, 2)) == [5, 6, 7]
ray.get(check_remote.remote(it))
def test_union(ray_start_regular_shared):
it1 = from_items(["a", "b", "c"], 1)
it2 = from_items(["x", "y", "z"], 1)
it = it1.union(it2)
assert (repr(it) == "ParallelIterator[ParallelUnion[ParallelIterator["
"from_items[str, 3, shards=1]], ParallelIterator["
"from_items[str, 3, shards=1]]]]")
assert list(it.gather_sync()) == ["a", "x", "b", "y", "c", "z"]
def test_union_local(ray_start_regular_shared):
it1 = from_items(["a", "b", "c"], 1).gather_async()
it2 = from_range(5, 2).for_each(str).gather_async()
it = it1.union(it2)
assert sorted(it) == ["0", "1", "2", "3", "4", "a", "b", "c"]
def test_union_async(ray_start_regular_shared):
def gen_fast():
for i in range(10):
time.sleep(0.05)
print("PRODUCE FAST", i)
yield i
def gen_slow():
for i in range(10):
time.sleep(0.3)
print("PRODUCE SLOW", i)
yield i
it1 = from_iterators([gen_fast]).for_each(lambda x: ("fast", x))
it2 = from_iterators([gen_slow]).for_each(lambda x: ("slow", x))
it = it1.union(it2)
results = list(it.gather_async())
assert all(x[0] == "slow" for x in results[-3:]), results
def test_union_local_async(ray_start_regular_shared):
def gen_fast():
for i in range(10):
time.sleep(0.05)
print("PRODUCE FAST", i)
yield i
def gen_slow():
for i in range(10):
time.sleep(0.3)
print("PRODUCE SLOW", i)
yield i
it1 = from_iterators([gen_fast]).for_each(lambda x: ("fast", x))
it2 = from_iterators([gen_slow]).for_each(lambda x: ("slow", x))
it = it1.gather_async().union(it2.gather_async())
assert (repr(it) == "LocalIterator[LocalUnion[LocalIterator["
"ParallelIterator[from_iterators[shards=1].for_each()]"
".gather_async()], LocalIterator[ParallelIterator["
"from_iterators[shards=1].for_each()].gather_async()]]]")
results = list(it)
assert all(x[0] == "slow" for x in results[-3:]), results
def test_serialization(ray_start_regular_shared):
it = (from_items([1, 2, 3, 4]).gather_sync().for_each(lambda x: x)
.filter(lambda x: True).batch(2).flatten())
assert (repr(it) == "LocalIterator[ParallelIterator["
"from_items[int, 4, shards=2]].gather_sync()."
"for_each().filter().batch(2).flatten()]")
@ray.remote
def get(it):
return list(it)
assert ray.get(get.remote(it)) == [1, 2, 3, 4]
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_memory_limits.py
|
Python
|
import numpy as np
import unittest
import ray
MB = 1024 * 1024
OBJECT_EVICTED = ray.exceptions.UnreconstructableError
OBJECT_TOO_LARGE = ray.exceptions.ObjectStoreFullError
@ray.remote
class LightActor:
def __init__(self):
pass
def sample(self):
return np.zeros(5 * MB, dtype=np.uint8)
@ray.remote
class GreedyActor:
def __init__(self):
pass
def sample(self):
return np.zeros(20 * MB, dtype=np.uint8)
class TestMemoryLimits(unittest.TestCase):
def testWithoutQuota(self):
self._run(100 * MB, None, None)
self.assertRaises(OBJECT_EVICTED, lambda: self._run(None, None, None))
self.assertRaises(OBJECT_EVICTED,
lambda: self._run(None, 100 * MB, None))
def testQuotasProtectSelf(self):
self._run(100 * MB, 100 * MB, None)
def testQuotasProtectOthers(self):
self._run(None, None, 100 * MB)
def testQuotaTooLarge(self):
self.assertRaisesRegexp(ray.memory_monitor.RayOutOfMemoryError,
".*Failed to set object_store_memory.*",
lambda: self._run(300 * MB, None, None))
def testTooLargeAllocation(self):
try:
ray.init(num_cpus=1, driver_object_store_memory=100 * MB)
ray.put(np.zeros(50 * MB, dtype=np.uint8), weakref=True)
self.assertRaises(
OBJECT_TOO_LARGE,
lambda: ray.put(np.zeros(200 * MB, dtype=np.uint8)))
finally:
ray.shutdown()
def _run(self, driver_quota, a_quota, b_quota):
print("*** Testing ***", driver_quota, a_quota, b_quota)
try:
ray.init(
num_cpus=1,
object_store_memory=300 * MB,
driver_object_store_memory=driver_quota)
z = ray.put("hi", weakref=True)
a = LightActor._remote(object_store_memory=a_quota)
b = GreedyActor._remote(object_store_memory=b_quota)
for _ in range(5):
r_a = a.sample.remote()
for _ in range(20):
new_oid = b.sample.remote()
ray.get(new_oid)
ray.get(r_a)
ray.get(z)
except Exception as e:
print("Raised exception", type(e), e)
raise e
finally:
print(ray.worker.global_worker.core_worker.
dump_object_store_memory_usage())
ray.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_memory_scheduling.py
|
Python
|
import numpy as np
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
MB = 1024 * 1024
@ray.remote(memory=100 * MB)
class Actor:
def __init__(self):
pass
def ping(self):
return "ok"
@ray.remote(object_store_memory=100 * MB)
class Actor2:
def __init__(self):
pass
def ping(self):
return "ok"
def train_oom(config, reporter):
ray.put(np.zeros(200 * 1024 * 1024))
reporter(result=123)
class TestMemoryScheduling(unittest.TestCase):
def testMemoryRequest(self):
try:
ray.init(num_cpus=1, memory=200 * MB)
# fits first 2
a = Actor.remote()
b = Actor.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(ok), 0)
finally:
ray.shutdown()
def testObjectStoreMemoryRequest(self):
try:
ray.init(num_cpus=1, object_store_memory=300 * MB)
# fits first 2 (70% allowed)
a = Actor2.remote()
b = Actor2.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor2.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(ok), 0)
finally:
ray.shutdown()
def testTuneDriverHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"memory": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_PG_" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneDriverStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda: tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
# too large
"object_store_memory": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneWorkerHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"num_workers": 1,
"memory_per_worker": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_Rollout" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneWorkerStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda:
tune.run("PG", stop={"timesteps_total": 0}, config={
"env": "CartPole-v0",
"num_workers": 1,
# too large
"object_store_memory_per_worker": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneObjectLimitApplied(self):
try:
result = tune.run(
train_oom,
resources_per_trial={"object_store_memory": 150 * 1024 * 1024},
raise_on_failed_trial=False)
self.assertTrue(result.trials[0].status, "ERROR")
self.assertTrue("ObjectStoreFullError: Failed to put" in
result.trials[0].error_msg)
finally:
ray.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_metrics.py
|
Python
|
import os
import grpc
import psutil
import requests
import time
import ray
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
from ray.test_utils import RayTestTimeoutException
def test_worker_stats(shutdown_only):
ray.init(num_cpus=1, include_webui=False)
raylet = ray.nodes()[0]
num_cpus = raylet["Resources"]["CPU"]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
def try_get_node_stats(num_retry=5, timeout=2):
reply = None
for _ in range(num_retry):
try:
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(), timeout=timeout)
break
except grpc.RpcError:
continue
assert reply is not None
return reply
reply = try_get_node_stats()
# Check that there is one connected driver.
drivers = [worker for worker in reply.workers_stats if worker.is_driver]
assert len(drivers) == 1
assert os.getpid() == drivers[0].pid
@ray.remote
def f():
ray.show_in_webui("test")
return os.getpid()
@ray.remote
class Actor:
def __init__(self):
pass
def f(self):
ray.show_in_webui("test")
return os.getpid()
# Test show_in_webui for remote functions.
worker_pid = ray.get(f.remote())
reply = try_get_node_stats()
target_worker_present = False
for worker in reply.workers_stats:
stats = worker.core_worker_stats
if stats.webui_display == "test":
target_worker_present = True
assert worker.pid == worker_pid
else:
assert stats.webui_display == ""
assert target_worker_present
# Test show_in_webui for remote actors.
a = Actor.remote()
worker_pid = ray.get(a.f.remote())
reply = try_get_node_stats()
target_worker_present = False
for worker in reply.workers_stats:
stats = worker.core_worker_stats
if stats.webui_display == "test":
target_worker_present = True
assert worker.pid == worker_pid
else:
assert stats.webui_display == ""
assert target_worker_present
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for worker processes")
# Wait for the workers to start.
if len(reply.workers_stats) < num_cpus + 1:
time.sleep(1)
reply = try_get_node_stats()
continue
# Check that the rest of the processes are workers, 1 for each CPU.
assert len(reply.workers_stats) == num_cpus + 1
views = [view.view_name for view in reply.view_data]
assert "redis_latency" in views
assert "local_available_resource" in views
# Check that all processes are Python.
pids = [worker.pid for worker in reply.workers_stats]
processes = [
p.info["name"] for p in psutil.process_iter(attrs=["pid", "name"])
if p.info["pid"] in pids
]
for process in processes:
# TODO(ekl) why does travis/mi end up in the process list
assert ("python" in process or "ray" in process
or "travis" in process)
break
def test_raylet_info_endpoint(shutdown_only):
addresses = ray.init(include_webui=True, num_cpus=6)
@ray.remote
def f():
return "test"
@ray.remote(num_cpus=1)
class ActorA:
def __init__(self):
pass
@ray.remote(resources={"CustomResource": 1})
class ActorB:
def __init__(self):
pass
@ray.remote(num_cpus=2)
class ActorC:
def __init__(self):
self.children = [ActorA.remote(), ActorB.remote()]
def local_store(self):
self.local_storage = [f.remote() for _ in range(10)]
def remote_store(self):
self.remote_storage = ray.put("test")
c = ActorC.remote()
c.local_store.remote()
c.remote_store.remote()
start_time = time.time()
while True:
time.sleep(1)
try:
webui_url = addresses["webui_url"]
webui_url = webui_url.replace("localhost", "http://127.0.0.1")
raylet_info = requests.get(webui_url + "/api/raylet_info").json()
actor_info = raylet_info["result"]["actors"]
try:
assert len(actor_info) == 1
_, parent_actor_info = actor_info.popitem()
assert parent_actor_info["numObjectIdsInScope"] == 11
assert parent_actor_info["numLocalObjects"] == 10
children = parent_actor_info["children"]
assert len(children) == 2
break
except AssertionError:
if time.time() > start_time + 30:
raise Exception("Timed out while waiting for actor info \
or object store info update.")
except requests.exceptions.ConnectionError:
if time.time() > start_time + 30:
raise Exception(
"Timed out while waiting for dashboard to start.")
assert parent_actor_info["usedResources"]["CPU"] == 2
assert parent_actor_info["numExecutedTasks"] == 3
for _, child_actor_info in children.items():
if child_actor_info["state"] == -1:
assert child_actor_info["requiredResources"]["CustomResource"] == 1
else:
assert child_actor_info["state"] == 0
assert len(child_actor_info["children"]) == 0
assert child_actor_info["usedResources"]["CPU"] == 1
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_microbenchmarks.py
|
Python
|
import pytest
import time
import numpy as np
import ray
def test_timing(ray_start_regular):
@ray.remote
def empty_function():
pass
@ray.remote
def trivial_function():
return 1
# Measure the time required to submit a remote task to the scheduler.
elapsed_times = []
for _ in range(1000):
start_time = time.time()
empty_function.remote()
end_time = time.time()
elapsed_times.append(end_time - start_time)
elapsed_times = np.sort(elapsed_times)
average_elapsed_time = sum(elapsed_times) / 1000
print("Time required to submit an empty function call:")
print(" Average: {}".format(average_elapsed_time))
print(" 90th percentile: {}".format(elapsed_times[900]))
print(" 99th percentile: {}".format(elapsed_times[990]))
print(" worst: {}".format(elapsed_times[999]))
# average_elapsed_time should be about 0.00038.
# Measure the time required to submit a remote task to the scheduler
# (where the remote task returns one value).
elapsed_times = []
for _ in range(1000):
start_time = time.time()
trivial_function.remote()
end_time = time.time()
elapsed_times.append(end_time - start_time)
elapsed_times = np.sort(elapsed_times)
average_elapsed_time = sum(elapsed_times) / 1000
print("Time required to submit a trivial function call:")
print(" Average: {}".format(average_elapsed_time))
print(" 90th percentile: {}".format(elapsed_times[900]))
print(" 99th percentile: {}".format(elapsed_times[990]))
print(" worst: {}".format(elapsed_times[999]))
# average_elapsed_time should be about 0.001.
# Measure the time required to submit a remote task to the scheduler
# and get the result.
elapsed_times = []
for _ in range(1000):
start_time = time.time()
x = trivial_function.remote()
ray.get(x)
end_time = time.time()
elapsed_times.append(end_time - start_time)
elapsed_times = np.sort(elapsed_times)
average_elapsed_time = sum(elapsed_times) / 1000
print("Time required to submit a trivial function call and get the "
"result:")
print(" Average: {}".format(average_elapsed_time))
print(" 90th percentile: {}".format(elapsed_times[900]))
print(" 99th percentile: {}".format(elapsed_times[990]))
print(" worst: {}".format(elapsed_times[999]))
# average_elapsed_time should be about 0.0013.
# Measure the time required to do do a put.
elapsed_times = []
for _ in range(1000):
start_time = time.time()
ray.put(1)
end_time = time.time()
elapsed_times.append(end_time - start_time)
elapsed_times = np.sort(elapsed_times)
average_elapsed_time = sum(elapsed_times) / 1000
print("Time required to put an int:")
print(" Average: {}".format(average_elapsed_time))
print(" 90th percentile: {}".format(elapsed_times[900]))
print(" 99th percentile: {}".format(elapsed_times[990]))
print(" worst: {}".format(elapsed_times[999]))
# average_elapsed_time should be about 0.00087.
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_cache(ray_start_regular):
A = np.random.rand(1, 1000000)
v = np.random.rand(1000000)
A_id = ray.put(A)
v_id = ray.put(v)
a = time.time()
for i in range(100):
A.dot(v)
b = time.time() - a
c = time.time()
for i in range(100):
ray.get(A_id).dot(ray.get(v_id))
d = time.time() - c
if d > 1.5 * b:
print("WARNING: The caching test was too slow. "
"d = {}, b = {}".format(d, b))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_mini.py
|
Python
|
import ray
test_values = [1, 1.0, "test", b"test", (0, 1), [0, 1], {0: 1}]
def test_basic_task_api(ray_start_regular):
# Test a simple function.
@ray.remote
def f_simple():
return 1
assert ray.get(f_simple.remote()) == 1
# Test multiple return values.
@ray.remote(num_return_vals=3)
def f_multiple_returns():
return 1, 2, 3
x_id1, x_id2, x_id3 = f_multiple_returns.remote()
assert ray.get([x_id1, x_id2, x_id3]) == [1, 2, 3]
# Test arguments passed by value.
@ray.remote
def f_args_by_value(x):
return x
for arg in test_values:
assert ray.get(f_args_by_value.remote(arg)) == arg
# Test arguments passed by ID.
# Test keyword arguments.
def test_put_api(ray_start_regular):
for obj in test_values:
assert ray.get(ray.put(obj)) == obj
# Test putting object IDs.
x_id = ray.put(0)
for obj in [[x_id], (x_id, ), {x_id: x_id}]:
assert ray.get(ray.put(obj)) == obj
def test_actor_api(ray_start_regular):
@ray.remote
class Foo:
def __init__(self, val):
self.x = val
def get(self):
return self.x
x = 1
f = Foo.remote(x)
assert (ray.get(f.get.remote()) == x)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_monitors.py
|
Python
|
import multiprocessing
import os
import pytest
import subprocess
import time
import ray
def _test_cleanup_on_driver_exit(num_redis_shards):
output = ray.utils.decode(
subprocess.check_output(
[
"ray",
"start",
"--head",
"--num-redis-shards",
str(num_redis_shards),
],
stderr=subprocess.STDOUT))
lines = [m.strip() for m in output.split("\n")]
init_cmd = [m for m in lines if m.startswith("ray.init")]
assert 1 == len(init_cmd)
address = init_cmd[0].split("address=\"")[-1][:-2]
max_attempts_before_failing = 100
# Wait for monitor.py to start working.
time.sleep(2)
def StateSummary():
obj_tbl_len = len(ray.objects())
task_tbl_len = len(ray.tasks())
return obj_tbl_len, task_tbl_len
def Driver(success):
success.value = True
# Start driver.
ray.init(address=address)
summary_start = StateSummary()
if (0, 1) != summary_start:
success.value = False
# Two new objects.
ray.get(ray.put(1111))
ray.get(ray.put(1111))
@ray.remote
def f():
ray.put(1111) # Yet another object.
return 1111 # A returned object as well.
# 1 new function.
attempts = 0
while (2, 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.get(f.remote())
attempts = 0
while (4, 2) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.shutdown()
success = multiprocessing.Value("b", False)
driver = multiprocessing.Process(target=Driver, args=(success, ))
driver.start()
# Wait for client to exit.
driver.join()
# Just make sure Driver() is run and succeeded.
assert success.value
# Check that objects, tasks, and functions are cleaned up.
ray.init(address=address)
attempts = 0
while (0, 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
break
assert (0, 1) == StateSummary()
ray.shutdown()
subprocess.check_output(["ray", "stop"])
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_single_redis_shard():
_test_cleanup_on_driver_exit(num_redis_shards=1)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_many_redis_shards():
_test_cleanup_on_driver_exit(num_redis_shards=5)
_test_cleanup_on_driver_exit(num_redis_shards=31)
if __name__ == "__main__":
import pytest
import sys
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_multi_node.py
|
Python
|
import os
import pytest
import subprocess
import time
import ray
from ray import ray_constants
from ray.test_utils import (
RayTestTimeoutException,
run_string_as_driver,
run_string_as_driver_nonblocking,
wait_for_children_of_pid,
wait_for_children_of_pid_to_exit,
kill_process_by_name,
)
def test_error_isolation(call_ray_start):
address = call_ray_start
# Connect a driver to the Ray cluster.
ray.init(address=address)
# There shouldn't be any errors yet.
assert len(ray.errors()) == 0
error_string1 = "error_string1"
error_string2 = "error_string2"
@ray.remote
def f():
raise Exception(error_string1)
# Run a remote function that throws an error.
with pytest.raises(Exception):
ray.get(f.remote())
# Wait for the error to appear in Redis.
while len(ray.errors()) != 1:
time.sleep(0.1)
print("Waiting for error to appear.")
# Make sure we got the error.
assert len(ray.errors()) == 1
assert error_string1 in ray.errors()[0]["message"]
# Start another driver and make sure that it does not receive this
# error. Make the other driver throw an error, and make sure it
# receives that error.
driver_script = """
import ray
import time
ray.init(address="{}")
time.sleep(1)
assert len(ray.errors()) == 0
@ray.remote
def f():
raise Exception("{}")
try:
ray.get(f.remote())
except Exception as e:
pass
while len(ray.errors()) != 1:
print(len(ray.errors()))
time.sleep(0.1)
assert len(ray.errors()) == 1
assert "{}" in ray.errors()[0]["message"]
print("success")
""".format(address, error_string2, error_string2)
out = run_string_as_driver(driver_script)
# Make sure the other driver succeeded.
assert "success" in out
# Make sure that the other error message doesn't show up for this
# driver.
assert len(ray.errors()) == 1
assert error_string1 in ray.errors()[0]["message"]
def test_remote_function_isolation(call_ray_start):
# This test will run multiple remote functions with the same names in
# two different drivers. Connect a driver to the Ray cluster.
address = call_ray_start
ray.init(address=address)
# Start another driver and make sure that it can define and call its
# own commands with the same names.
driver_script = """
import ray
import time
ray.init(address="{}")
@ray.remote
def f():
return 3
@ray.remote
def g(x, y):
return 4
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0, 0)])
assert result == [3, 4]
print("success")
""".format(address)
out = run_string_as_driver(driver_script)
@ray.remote
def f():
return 1
@ray.remote
def g(x):
return 2
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0)])
assert result == [1, 2]
# Make sure the other driver succeeded.
assert "success" in out
def test_driver_exiting_quickly(call_ray_start):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.
address = call_ray_start
ray.init(address=address)
# Define a driver that creates an actor and exits.
driver_script1 = """
import ray
ray.init(address="{}")
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
print("success")
""".format(address)
# Define a driver that creates some tasks and exits.
driver_script2 = """
import ray
ray.init(address="{}")
@ray.remote
def f():
return 1
f.remote()
print("success")
""".format(address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
out = run_string_as_driver(driver_script2)
# Make sure the first driver ran to completion.
assert "success" in out
def test_drivers_named_actors(call_ray_start):
# This test will create some drivers that submit some tasks to the same
# named actor.
address = call_ray_start
ray.init(address=address)
# Define a driver that creates a named actor then sleeps for a while.
driver_script1 = """
import ray
import time
ray.init(address="{}")
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
return self.count
counter = Counter.remote()
ray.experimental.register_actor("Counter", counter)
time.sleep(100)
""".format(address)
# Define a driver that submits to the named actor and exits.
driver_script2 = """
import ray
import time
ray.init(address="{}")
while True:
try:
counter = ray.experimental.get_actor("Counter")
break
except ValueError:
time.sleep(1)
assert ray.get(counter.increment.remote()) == {}
print("success")
""".format(address, "{}")
process_handle = run_string_as_driver_nonblocking(driver_script1)
for i in range(3):
driver_script = driver_script2.format(i + 1)
out = run_string_as_driver(driver_script)
assert "success" in out
process_handle.kill()
def test_receive_late_worker_logs():
# Make sure that log messages from tasks appear in the stdout even if the
# script exits quickly.
log_message = "some helpful debugging message"
# Define a driver that creates a task that prints something, ensures that
# the task runs, and then exits.
driver_script = """
import ray
import random
import time
log_message = "{}"
@ray.remote
class Actor:
def log(self):
print(log_message)
@ray.remote
def f():
print(log_message)
ray.init(num_cpus=2)
a = Actor.remote()
ray.get([a.log.remote(), f.remote()])
ray.get([a.log.remote(), f.remote()])
""".format(log_message)
for _ in range(2):
out = run_string_as_driver(driver_script)
assert out.count(log_message) == 4
@pytest.mark.parametrize(
"call_ray_start", ["ray start --head --num-cpus=1 --num-gpus=1"],
indirect=True)
def test_drivers_release_resources(call_ray_start):
address = call_ray_start
# Define a driver that creates an actor and exits.
driver_script1 = """
import time
import ray
ray.init(address="{}")
@ray.remote
def f(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
def g(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
class Foo:
def __init__(self):
pass
# Make sure some resources are available for us to run tasks.
ray.get(f.remote(0))
ray.get(g.remote(0))
# Start a bunch of actors and tasks that use resources. These should all be
# cleaned up when this driver exits.
foos = [Foo.remote() for _ in range(100)]
[f.remote(10 ** 6) for _ in range(100)]
print("success")
""".format(address)
driver_script2 = (driver_script1 +
"import sys\nsys.stdout.flush()\ntime.sleep(10 ** 6)\n")
def wait_for_success_output(process_handle, timeout=10):
# Wait until the process prints "success" and then return.
start_time = time.time()
while time.time() - start_time < timeout:
output_line = ray.utils.decode(
process_handle.stdout.readline()).strip()
print(output_line)
if output_line == "success":
return
raise RayTestTimeoutException(
"Timed out waiting for process to print success.")
# Make sure we can run this driver repeatedly, which means that resources
# are getting released in between.
for _ in range(5):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
# Also make sure that this works when the driver exits ungracefully.
process_handle = run_string_as_driver_nonblocking(driver_script2)
wait_for_success_output(process_handle)
# Kill the process ungracefully.
process_handle.kill()
def test_calling_start_ray_head(call_ray_stop_only):
# Test that we can call ray start with various command line
# parameters. TODO(rkn): This test only tests the --head code path. We
# should also test the non-head node code path.
# Test starting Ray with no arguments.
subprocess.check_output(["ray", "start", "--head"])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with a redis port specified.
subprocess.check_output(["ray", "start", "--head", "--redis-port", "6379"])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with a node IP address specified.
subprocess.check_output(
["ray", "start", "--head", "--node-ip-address", "127.0.0.1"])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with the object manager and node manager ports
# specified.
subprocess.check_output([
"ray", "start", "--head", "--object-manager-port", "12345",
"--node-manager-port", "54321"
])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with the number of CPUs specified.
subprocess.check_output(["ray", "start", "--head", "--num-cpus", "2"])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with the number of GPUs specified.
subprocess.check_output(["ray", "start", "--head", "--num-gpus", "100"])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with the max redis clients specified.
subprocess.check_output(
["ray", "start", "--head", "--redis-max-clients", "100"])
subprocess.check_output(["ray", "stop"])
if "RAY_USE_NEW_GCS" not in os.environ:
# Test starting Ray with redis shard ports specified.
subprocess.check_output([
"ray", "start", "--head", "--redis-shard-ports", "6380,6381,6382"
])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with all arguments specified.
subprocess.check_output([
"ray", "start", "--head", "--redis-port", "6379",
"--redis-shard-ports", "6380,6381,6382", "--object-manager-port",
"12345", "--num-cpus", "2", "--num-gpus", "0",
"--redis-max-clients", "100", "--resources", "{\"Custom\": 1}"
])
subprocess.check_output(["ray", "stop"])
# Test starting Ray with invalid arguments.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(
["ray", "start", "--head", "--address", "127.0.0.1:6379"])
subprocess.check_output(["ray", "stop"])
# Test --block. Killing a child process should cause the command to exit.
blocked = subprocess.Popen(["ray", "start", "--head", "--block"])
wait_for_children_of_pid(blocked.pid, num_children=7, timeout=30)
blocked.poll()
assert blocked.returncode is None
kill_process_by_name("raylet")
wait_for_children_of_pid_to_exit(blocked.pid, timeout=120)
blocked.wait()
assert blocked.returncode != 0, "ray start shouldn't return 0 on bad exit"
# Test --block. Killing the command should clean up all child processes.
blocked = subprocess.Popen(["ray", "start", "--head", "--block"])
blocked.poll()
assert blocked.returncode is None
wait_for_children_of_pid(blocked.pid, num_children=7, timeout=30)
blocked.terminate()
wait_for_children_of_pid_to_exit(blocked.pid, timeout=120)
blocked.wait()
assert blocked.returncode != 0, "ray start shouldn't return 0 on bad exit"
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --num-cpus=1 " +
"--node-ip-address=localhost --redis-port=6379"
],
indirect=True)
def test_using_hostnames(call_ray_start):
ray.init(node_ip_address="localhost", address="localhost:6379")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_connecting_in_local_case(ray_start_regular):
address_info = ray_start_regular
# Define a driver that just connects to Redis.
driver_script = """
import ray
ray.init(address="{}")
print("success")
""".format(address_info["redis_address"])
out = run_string_as_driver(driver_script)
# Make sure the other driver succeeded.
assert "success" in out
def test_run_driver_twice(ray_start_regular):
# We used to have issue 2165 and 2288:
# https://github.com/ray-project/ray/issues/2165
# https://github.com/ray-project/ray/issues/2288
# both complain that driver will hang when run for the second time.
# This test is used to verify the fix for above issue, it will run the
# same driver for twice and verify whether both of them succeed.
address_info = ray_start_regular
driver_script = """
import ray
import ray.tune as tune
import os
import time
def train_func(config, reporter): # add a reporter arg
for i in range(2):
time.sleep(0.1)
reporter(timesteps_total=i, mean_accuracy=i+97) # report metrics
os.environ["TUNE_RESUME_PROMPT_OFF"] = "True"
ray.init(address="{}")
ray.tune.register_trainable("train_func", train_func)
tune.run_experiments({{
"my_experiment": {{
"run": "train_func",
"stop": {{"mean_accuracy": 99}},
"config": {{
"layer1": {{
"class_name": tune.grid_search(["a"]),
"config": {{"lr": tune.grid_search([1, 2])}}
}},
}},
"local_dir": os.path.expanduser("~/tmp")
}}
}})
print("success")
""".format(address_info["redis_address"])
for i in range(2):
out = run_string_as_driver(driver_script)
assert "success" in out
@pytest.mark.skipif(
ray_constants.direct_call_enabled(),
reason="fate sharing not implemented yet")
def test_driver_exiting_when_worker_blocked(call_ray_start):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.
address = call_ray_start
ray.init(address=address)
# Define a driver that creates two tasks, one that runs forever and the
# other blocked on the first in a `ray.get`.
driver_script = """
import time
import ray
ray.init(address="{}")
@ray.remote
def f():
time.sleep(10**6)
@ray.remote
def g():
ray.get(f.remote())
g.remote()
time.sleep(1)
print("success")
""".format(address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script)
# Make sure the first driver ran to completion.
assert "success" in out
# Define a driver that creates two tasks, one that runs forever and the
# other blocked on the first in a `ray.wait`.
driver_script = """
import time
import ray
ray.init(address="{}")
@ray.remote
def f():
time.sleep(10**6)
@ray.remote
def g():
ray.wait([f.remote()])
g.remote()
time.sleep(1)
print("success")
""".format(address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script)
# Make sure the first driver ran to completion.
assert "success" in out
# Define a driver that creates one task that depends on a nonexistent
# object. This task will be queued as waiting to execute.
driver_script_template = """
import time
import ray
ray.init(address="{}")
@ray.remote
def g(x):
return
g.remote(ray.ObjectID(ray.utils.hex_to_binary("{}")))
time.sleep(1)
print("success")
"""
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
nonexistent_id = ray.ObjectID.from_random()
driver_script = driver_script_template.format(address,
nonexistent_id.hex())
out = run_string_as_driver(driver_script)
# Simulate the nonexistent dependency becoming available.
ray.worker.global_worker.put_object(None, nonexistent_id)
# Make sure the first driver ran to completion.
assert "success" in out
# Define a driver that calls `ray.wait` on a nonexistent object.
driver_script_template = """
import time
import ray
ray.init(address="{}")
@ray.remote
def g():
ray.wait(ray.ObjectID(ray.utils.hex_to_binary("{}")))
g.remote()
time.sleep(1)
print("success")
"""
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
nonexistent_id = ray.ObjectID.from_random()
driver_script = driver_script_template.format(address,
nonexistent_id.hex())
out = run_string_as_driver(driver_script)
# Simulate the nonexistent dependency becoming available.
ray.worker.global_worker.put_object(None, nonexistent_id)
# Make sure the first driver ran to completion.
assert "success" in out
@ray.remote
def f():
return 1
# Make sure we can still talk with the raylet.
ray.get(f.remote())
@pytest.mark.parametrize(
"call_ray_start", ["ray start --head --num-cpus=1 --use-pickle"],
indirect=True)
def test_use_pickle(call_ray_start):
address = call_ray_start
ray.init(address=address, use_pickle=True)
assert ray.worker.global_worker.use_pickle
x = (2, "hello")
@ray.remote
def f(x):
assert x == (2, "hello")
assert ray.worker.global_worker.use_pickle
return (3, "world")
assert ray.get(f.remote(x)) == (3, "world")
if __name__ == "__main__":
import pytest
import sys
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_multi_node_2.py
|
Python
|
import logging
import pytest
import time
import ray
import ray.ray_constants as ray_constants
from ray.monitor import Monitor
from ray.cluster_utils import Cluster
from ray.test_utils import generate_internal_config_map
logger = logging.getLogger(__name__)
def test_cluster():
"""Basic test for adding and removing nodes in cluster."""
g = Cluster(initialize_head=False)
node = g.add_node()
node2 = g.add_node()
assert node.remaining_processes_alive()
assert node2.remaining_processes_alive()
g.remove_node(node2)
g.remove_node(node)
assert not any(n.any_processes_alive() for n in [node, node2])
def test_shutdown():
g = Cluster(initialize_head=False)
node = g.add_node()
node2 = g.add_node()
g.shutdown()
assert not any(n.any_processes_alive() for n in [node, node2])
@pytest.mark.parametrize(
"ray_start_cluster_head",
[generate_internal_config_map(num_heartbeats_timeout=20)],
indirect=True)
def test_internal_config(ray_start_cluster_head):
"""Checks that the internal configuration setting works.
We set the cluster to timeout nodes after 2 seconds of no timeouts. We
then remove a node, wait for 1 second to check that the cluster is out
of sync, then wait another 2 seconds (giving 1 second of leeway) to check
that the client has timed out.
"""
cluster = ray_start_cluster_head
worker = cluster.add_node()
cluster.wait_for_nodes()
cluster.remove_node(worker, allow_graceful=False)
time.sleep(1)
assert ray.cluster_resources()["CPU"] == 2
time.sleep(2)
assert ray.cluster_resources()["CPU"] == 1
def setup_monitor(address):
monitor = Monitor(
address, None, redis_password=ray_constants.REDIS_DEFAULT_PASSWORD)
monitor.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL)
monitor.subscribe(ray.gcs_utils.XRAY_JOB_CHANNEL) # TODO: Remove?
monitor.update_raylet_map(_append_port=True)
monitor._maybe_flush_gcs()
return monitor
def verify_load_metrics(monitor, expected_resource_usage=None, timeout=10):
while True:
monitor.process_messages()
resource_usage = monitor.load_metrics.get_resource_usage()
if "memory" in resource_usage[1]:
del resource_usage[1]["memory"]
if "object_store_memory" in resource_usage[2]:
del resource_usage[1]["object_store_memory"]
if "memory" in resource_usage[2]:
del resource_usage[2]["memory"]
if "object_store_memory" in resource_usage[2]:
del resource_usage[2]["object_store_memory"]
for key in list(resource_usage[1].keys()):
if key.startswith("node:"):
del resource_usage[1][key]
for key in list(resource_usage[2].keys()):
if key.startswith("node:"):
del resource_usage[2][key]
if expected_resource_usage is None:
if all(x for x in resource_usage[1:]):
break
elif all(x == y
for x, y in zip(resource_usage, expected_resource_usage)):
break
else:
timeout -= 1
time.sleep(1)
if timeout <= 0:
raise ValueError("Timeout. {} != {}".format(
resource_usage, expected_resource_usage))
return resource_usage
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 1,
}, {
"num_cpus": 2,
}],
indirect=True)
def test_heartbeats_single(ray_start_cluster_head):
"""Unit test for `Cluster.wait_for_nodes`.
Test proper metrics.
"""
cluster = ray_start_cluster_head
timeout = 5
monitor = setup_monitor(cluster.address)
total_cpus = ray.state.cluster_resources()["CPU"]
verify_load_metrics(monitor, (0.0, {"CPU": 0.0}, {"CPU": total_cpus}))
@ray.remote
def work(timeout):
time.sleep(timeout)
return True
work_handle = work.remote(timeout * 2)
verify_load_metrics(monitor, (1.0 / total_cpus, {
"CPU": 1.0
}, {
"CPU": total_cpus
}))
ray.get(work_handle)
@ray.remote
class Actor:
def work(self, timeout):
time.sleep(timeout)
return True
test_actor = Actor.remote()
work_handle = test_actor.work.remote(timeout * 2)
verify_load_metrics(monitor, (1.0 / total_cpus, {
"CPU": 1.0
}, {
"CPU": total_cpus
}))
ray.get(work_handle)
def test_wait_for_nodes(ray_start_cluster_head):
"""Unit test for `Cluster.wait_for_nodes`.
Adds 4 workers, waits, then removes 4 workers, waits,
then adds 1 worker, waits, and removes 1 worker, waits.
"""
cluster = ray_start_cluster_head
workers = [cluster.add_node() for i in range(4)]
cluster.wait_for_nodes()
[cluster.remove_node(w) for w in workers]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
worker2 = cluster.add_node()
cluster.wait_for_nodes()
cluster.remove_node(worker2)
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
def test_worker_plasma_store_failure(ray_start_cluster_head):
cluster = ray_start_cluster_head
worker = cluster.add_node()
cluster.wait_for_nodes()
worker.kill_reporter()
worker.kill_plasma_store()
worker.kill_reaper()
worker.all_processes[ray_constants.PROCESS_TYPE_RAYLET][0].process.wait()
assert not worker.any_processes_alive(), worker.live_processes()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_multinode_failures.py
|
Python
|
import json
import os
import signal
import sys
import time
import pytest
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import RayTestTimeoutException
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_ids = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_ids += [f.remote(object_id) for object_id in object_ids]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_id in object_ids:
try:
ray.get(object_id)
except (ray.exceptions.RayTaskError, ray.exceptions.RayWorkerError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
if RAY_FORCE_DIRECT:
# Sleep to make sure that tasks actually fail mid-execution. We
# only use it for direct calls because the test already takes a
# long time to run with the raylet codepath.
time.sleep(0.01)
return x
@ray.remote
def g(*xs):
if RAY_FORCE_DIRECT:
# Sleep to make sure that tasks actually fail mid-execution. We
# only use it for direct calls because the test already takes a
# long time to run with the raylet codepath.
time.sleep(0.01)
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster",
[{
"num_cpus": 8,
"num_nodes": 4,
"_internal_config": json.dumps({
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10 if RAY_FORCE_DIRECT else 100
}),
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
# The plasma stores should still be alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
True)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
@pytest.mark.parametrize(
"ray_start_cluster",
[{
"num_cpus": 8,
"num_nodes": 2,
"_internal_config": json.dumps({
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10 if RAY_FORCE_DIRECT else 100
}),
}],
indirect=True)
def test_plasma_store_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all plasma stores on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE)
# No processes should be left alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
False)
check_components_alive(cluster, ray_constants.PROCESS_TYPE_RAYLET, False)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_multinode_failures_2.py
|
Python
|
import json
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
@pytest.mark.skipif(
RAY_FORCE_DIRECT,
reason="No reconstruction for objects placed in plasma yet")
@pytest.mark.parametrize(
"ray_start_cluster",
[{
# Force at least one task per node.
"num_cpus": 1,
"num_nodes": 4,
"object_store_memory": 1000 * 1024 * 1024,
"_internal_config": json.dumps({
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10 if RAY_FORCE_DIRECT else 100,
"object_manager_pull_timeout_ms": 1000,
"object_manager_push_timeout_ms": 1000,
"object_manager_repeated_push_delay_ms": 1000,
}),
}],
indirect=True)
def test_object_reconstruction(ray_start_cluster):
cluster = ray_start_cluster
# Submit tasks with dependencies in plasma.
@ray.remote
def large_value():
# Sleep for a bit to force tasks onto different nodes.
time.sleep(0.1)
return np.zeros(10 * 1024 * 1024)
@ray.remote
def g(x):
return
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
component_type = ray_constants.PROCESS_TYPE_RAYLET
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
num_tasks = len(worker_nodes)
xs = [large_value.remote() for _ in range(num_tasks)]
# Wait for the tasks to complete, then evict the objects from the local
# node.
for x in xs:
ray.get(x)
ray.internal.free([x], local_only=True)
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
print("F", xs)
xs = [g.remote(x) for x in xs]
print("G", xs)
ray.get(xs)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 4,
"num_nodes": 3,
"do_init": True
}],
indirect=True)
def test_actor_creation_node_failure(ray_start_cluster):
# TODO(swang): Refactor test_raylet_failed, etc to reuse the below code.
cluster = ray_start_cluster
@ray.remote
class Child:
def __init__(self, death_probability):
self.death_probability = death_probability
def ping(self):
# Exit process with some probability.
exit_chance = np.random.rand()
if exit_chance < self.death_probability:
sys.exit(-1)
num_children = 25
# Children actors will die about half the time.
death_probability = 0.5
children = [Child.remote(death_probability) for _ in range(num_children)]
while len(cluster.list_all_nodes()) > 1:
for j in range(2):
# Submit some tasks on the actors. About half of the actors will
# fail.
children_out = [child.ping.remote() for child in children]
# Wait a while for all the tasks to complete. This should trigger
# reconstruction for any actor creation tasks that were forwarded
# to nodes that then failed.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Replace any actors that died.
for i, out in enumerate(children_out):
try:
ray.get(out)
except ray.exceptions.RayActorError:
children[i] = Child.remote(death_probability)
# Remove a node. Any actor creation tasks that were forwarded to this
# node must be reconstructed.
cluster.remove_node(cluster.list_all_nodes()[-1])
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_sequential(ray_start_regular):
ray.worker._global_node.kill_raylet()
ray.worker._global_node.kill_plasma_store()
ray.worker._global_node.kill_log_monitor()
ray.worker._global_node.kill_monitor()
ray.worker._global_node.kill_raylet_monitor()
# If the driver can reach the tearDown method, then it is still alive.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_parallel(ray_start_regular):
all_processes = ray.worker._global_node.all_processes
process_infos = (all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET] +
all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR])
assert len(process_infos) == 5
# Kill all the components in parallel.
for process_info in process_infos:
process_info.process.terminate()
time.sleep(0.1)
for process_info in process_infos:
process_info.process.kill()
for process_info in process_infos:
process_info.process.wait()
# If the driver can reach the tearDown method, then it is still alive.
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_multiprocessing.py
|
Python
|
import os
import pytest
import tempfile
import time
import random
import subprocess
from collections import defaultdict
import queue
import ray
from ray.experimental.multiprocessing import Pool, TimeoutError
@pytest.fixture
def cleanup_only():
yield None
ray.shutdown()
subprocess.check_output(["ray", "stop"])
if "RAY_ADDRESS" in os.environ:
del os.environ["RAY_ADDRESS"]
@pytest.fixture
def pool():
pool = Pool(processes=1)
yield pool
pool.terminate()
ray.shutdown()
@pytest.fixture
def pool_4_processes():
pool = Pool(processes=4)
yield pool
pool.terminate()
ray.shutdown()
def test_initialize_ray(cleanup_only):
def getpid(args):
return os.getpid()
def check_pool_size(pool, size):
args = [tuple() for _ in range(size)]
assert len(set(pool.map(getpid, args))) == size
# Check that starting a pool starts ray if not initialized.
pool = Pool(processes=2)
assert ray.is_initialized()
assert int(ray.state.cluster_resources()["CPU"]) == 2
check_pool_size(pool, 2)
ray.shutdown()
# Check that starting a pool doesn't affect ray if there is a local
# ray cluster running.
ray.init(num_cpus=3)
assert ray.is_initialized()
pool = Pool(processes=2)
assert int(ray.state.cluster_resources()["CPU"]) == 3
check_pool_size(pool, 2)
ray.shutdown()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
ray.init(num_cpus=1)
assert ray.is_initialized()
with pytest.raises(ValueError):
Pool(processes=2)
assert int(ray.state.cluster_resources()["CPU"]) == 1
ray.shutdown()
# Use different numbers of CPUs to distinguish between starting a local
# ray cluster and connecting to an existing one.
init_cpus = 2
start_cpus = 3
# Start a ray cluster in the background.
subprocess.check_output(
["ray", "start", "--head", "--num-cpus={}".format(start_cpus)])
# Check that starting a pool still starts ray if RAY_ADDRESS not set.
pool = Pool(processes=init_cpus)
assert ray.is_initialized()
assert int(ray.state.cluster_resources()["CPU"]) == init_cpus
check_pool_size(pool, init_cpus)
ray.shutdown()
# Check that starting a pool connects to a running ray cluster if
# ray_address is passed in.
pool = Pool(ray_address="auto")
assert ray.is_initialized()
assert int(ray.state.cluster_resources()["CPU"]) == start_cpus
check_pool_size(pool, start_cpus)
ray.shutdown()
# Set RAY_ADDRESS, so pools should connect to the running ray cluster.
os.environ["RAY_ADDRESS"] = "auto"
# Check that starting a pool connects to a running ray cluster if
# RAY_ADDRESS is set.
pool = Pool()
assert ray.is_initialized()
assert int(ray.state.cluster_resources()["CPU"]) == start_cpus
check_pool_size(pool, start_cpus)
ray.shutdown()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
with pytest.raises(Exception):
Pool(processes=start_cpus + 1)
assert int(ray.state.cluster_resources()["CPU"]) == start_cpus
ray.shutdown()
# Clean up the background ray cluster.
subprocess.check_output(["ray", "stop"])
def test_initializer(cleanup_only):
def init(dirname):
with open(os.path.join(dirname, str(os.getpid())), "w") as f:
print("hello", file=f)
with tempfile.TemporaryDirectory() as dirname:
num_processes = 4
pool = Pool(
processes=num_processes, initializer=init, initargs=(dirname, ))
assert len(os.listdir(dirname)) == 4
pool.terminate()
def test_close(pool_4_processes):
def f(object_id):
return ray.get(object_id)
object_id = ray.ObjectID.from_random()
result = pool_4_processes.map_async(f, [object_id for _ in range(4)])
assert not result.ready()
pool_4_processes.close()
assert not result.ready()
# Fulfill the object_id, causing the head of line tasks to finish.
ray.worker.global_worker.put_object("hello", object_id=object_id)
pool_4_processes.join()
# close() shouldn't interrupt pending tasks, so check that they succeeded.
assert result.ready()
assert result.successful()
assert result.get() == ["hello"] * 4
def test_terminate(pool_4_processes):
def f(object_id):
return ray.get(object_id)
object_id = ray.ObjectID.from_random()
result = pool_4_processes.map_async(f, [object_id for _ in range(4)])
assert not result.ready()
pool_4_processes.terminate()
# terminate() should interrupt pending tasks, so check that join() returns
# even though the tasks should be blocked forever.
pool_4_processes.join()
result.wait(timeout=10)
assert result.ready()
assert not result.successful()
def test_apply(pool):
def f(arg1, arg2, kwarg1=None, kwarg2=None):
assert arg1 == 1
assert arg2 == 2
assert kwarg1 is None
assert kwarg2 == 3
return 1
assert pool.apply(f, (1, 2), {"kwarg2": 3}) == 1
with pytest.raises(AssertionError):
pool.apply(f, (
2,
2,
), {"kwarg2": 3})
with pytest.raises(Exception):
pool.apply(f, (1, ))
with pytest.raises(Exception):
pool.apply(f, (1, 2), {"kwarg1": 3})
def test_apply_async(pool):
def f(arg1, arg2, kwarg1=None, kwarg2=None):
assert arg1 == 1
assert arg2 == 2
assert kwarg1 is None
assert kwarg2 == 3
return 1
assert pool.apply_async(f, (1, 2), {"kwarg2": 3}).get() == 1
with pytest.raises(AssertionError):
pool.apply_async(f, (
2,
2,
), {
"kwarg2": 3
}).get()
with pytest.raises(Exception):
pool.apply_async(f, (1, )).get()
with pytest.raises(Exception):
pool.apply_async(f, (1, 2), {"kwarg1": 3}).get()
# Won't return until the input ObjectID is fulfilled.
def ten_over(input):
return 10 / ray.get(input[0])
# Generate a random ObjectID that will be fulfilled later.
object_id = ray.ObjectID.from_random()
result = pool.apply_async(ten_over, ([object_id], ))
result.wait(timeout=0.01)
assert not result.ready()
with pytest.raises(TimeoutError):
result.get(timeout=0.01)
# Fulfill the ObjectID.
ray.worker.global_worker.put_object(10, object_id=object_id)
result.wait(timeout=10)
assert result.ready()
assert result.successful()
assert result.get() == 1
# Generate a random ObjectID that will be fulfilled later.
object_id = ray.ObjectID.from_random()
result = pool.apply_async(ten_over, ([object_id], ))
with pytest.raises(ValueError, match="not ready"):
result.successful()
# Fulfill the ObjectID with 0, causing the task to fail (divide by zero).
ray.worker.global_worker.put_object(0, object_id=object_id)
result.wait(timeout=10)
assert result.ready()
assert not result.successful()
with pytest.raises(ZeroDivisionError):
result.get()
def test_map(pool_4_processes):
def f(index):
return index, os.getpid()
results = pool_4_processes.map(f, range(1000))
assert len(results) == 1000
pid_counts = defaultdict(int)
for i, (index, pid) in enumerate(results):
assert i == index
pid_counts[pid] += 1
# Check that the functions are spread somewhat evenly.
for count in pid_counts.values():
assert count > 100
def bad_func(args):
raise Exception("test_map failure")
with pytest.raises(Exception, match="test_map failure"):
pool_4_processes.map(bad_func, range(100))
def test_map_async(pool_4_processes):
def f(args):
index = args[0]
ray.get(args[1])
return index, os.getpid()
# Generate a random ObjectID that will be fulfilled later.
object_id = ray.ObjectID.from_random()
async_result = pool_4_processes.map_async(
f, [(i, object_id) for i in range(1000)])
assert not async_result.ready()
with pytest.raises(TimeoutError):
async_result.get(timeout=0.01)
async_result.wait(timeout=0.01)
# Fulfill the object ID, finishing the tasks.
ray.worker.global_worker.put_object(0, object_id=object_id)
async_result.wait(timeout=10)
assert async_result.ready()
assert async_result.successful()
results = async_result.get()
assert len(results) == 1000
pid_counts = defaultdict(int)
for i, (index, pid) in enumerate(results):
assert i == index
pid_counts[pid] += 1
# Check that the functions are spread somewhat evenly.
for count in pid_counts.values():
assert count > 100
def bad_func(index):
if index == 50:
raise Exception("test_map_async failure")
async_result = pool_4_processes.map_async(bad_func, range(100))
async_result.wait(10)
assert async_result.ready()
assert not async_result.successful()
with pytest.raises(Exception, match="test_map_async failure"):
async_result.get()
def test_starmap(pool):
def f(*args):
return args
args = [tuple(range(i)) for i in range(100)]
assert pool.starmap(f, args) == args
def test_callbacks(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
callback_queue = queue.Queue()
def callback(result):
callback_queue.put(result)
def error_callback(error):
callback_queue.put(error)
# Will not error, check that callback is called.
result = pool_4_processes.apply_async(f, ((0, [1]), ), callback=callback)
assert callback_queue.get() == 0
result.get()
# Will error, check that error_callback is called.
result = pool_4_processes.apply_async(
f, ((0, [0]), ), error_callback=error_callback)
assert isinstance(callback_queue.get(), Exception)
with pytest.raises(Exception, match="intentional failure"):
result.get()
# Test callbacks for map_async.
error_indices = [2, 50, 98]
result = pool_4_processes.map_async(
f, [(index, error_indices) for index in range(100)],
callback=callback,
error_callback=error_callback)
callback_results = []
while len(callback_results) < 100:
callback_results.append(callback_queue.get())
assert result.ready()
assert not result.successful()
# Check that callbacks were called on every result, error or not.
assert len(callback_results) == 100
# Check that callbacks were processed in the order that the tasks finished.
# NOTE: this could be flaky if the calls happened to finish in order due
# to the random sleeps, but it's very unlikely.
assert not all(i in error_indices or i == result
for i, result in enumerate(callback_results))
# Check that the correct callbacks were called on errors/successes.
assert all(index not in callback_results for index in error_indices)
assert [isinstance(result, Exception)
for result in callback_results].count(True) == len(error_indices)
def test_imap(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
error_indices = [2, 50, 98]
result_iter = pool_4_processes.imap(
f, [(index, error_indices) for index in range(100)], chunksize=11)
for i in range(100):
result = result_iter.next()
if i in error_indices:
assert isinstance(result, Exception)
else:
assert result == i
with pytest.raises(StopIteration):
result_iter.next()
def test_imap_unordered(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
error_indices = [2, 50, 98]
in_order = []
num_errors = 0
result_iter = pool_4_processes.imap_unordered(
f, [(index, error_indices) for index in range(100)], chunksize=11)
for i in range(100):
result = result_iter.next()
if isinstance(result, Exception):
in_order.append(True)
num_errors += 1
else:
in_order.append(result == i)
# Check that the results didn't come back all in order.
# NOTE: this could be flaky if the calls happened to finish in order due
# to the random sleeps, but it's very unlikely.
assert not all(in_order)
assert num_errors == len(error_indices)
with pytest.raises(StopIteration):
result_iter.next()
def test_imap_timeout(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
wait_index = args[1]
object_id = args[2]
if index == wait_index:
ray.get(object_id)
return index
wait_index = 23
object_id = ray.ObjectID.from_random()
result_iter = pool_4_processes.imap(
f, [(index, wait_index, object_id) for index in range(100)])
for i in range(100):
if i == wait_index:
with pytest.raises(TimeoutError):
result = result_iter.next(timeout=0.1)
ray.worker.global_worker.put_object(None, object_id=object_id)
result = result_iter.next()
assert result == i
with pytest.raises(StopIteration):
result_iter.next()
wait_index = 23
object_id = ray.ObjectID.from_random()
result_iter = pool_4_processes.imap_unordered(
f, [(index, wait_index, object_id) for index in range(100)],
chunksize=11)
in_order = []
for i in range(100):
try:
result = result_iter.next(timeout=1)
except TimeoutError:
ray.worker.global_worker.put_object(None, object_id=object_id)
result = result_iter.next()
in_order.append(result == i)
# Check that the results didn't come back all in order.
# NOTE: this could be flaky if the calls happened to finish in order due
# to the random sleeps, but it's very unlikely.
assert not all(in_order)
with pytest.raises(StopIteration):
result_iter.next()
def test_maxtasksperchild(cleanup_only):
def f(args):
return os.getpid()
pool = Pool(5, maxtasksperchild=1)
assert len(set(pool.map(f, range(20)))) == 20
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_node_manager.py
|
Python
|
import ray
from ray.test_utils import run_string_as_driver
# This tests the queue transitions for infeasible tasks. This has been an issue
# in the past, e.g., https://github.com/ray-project/ray/issues/3275.
def test_infeasible_tasks(ray_start_cluster):
cluster = ray_start_cluster
@ray.remote
def f():
return
cluster.add_node(resources={str(0): 100})
ray.init(address=cluster.address)
# Submit an infeasible task.
x_id = f._submit(args=[], kwargs={}, resources={str(1): 1})
# Add a node that makes the task feasible and make sure we can get the
# result.
cluster.add_node(resources={str(1): 100})
ray.get(x_id)
# Start a driver that submits an infeasible task and then let it exit.
driver_script = """
import ray
ray.init(address="{}")
@ray.remote(resources={})
def f():
{}pass # This is a weird hack to insert some blank space.
f.remote()
""".format(cluster.address, "{str(2): 1}", " ")
run_string_as_driver(driver_script)
# Now add a new node that makes the task feasible.
cluster.add_node(resources={str(2): 100})
# Make sure we can still run tasks on all nodes.
ray.get([
f._submit(args=[], kwargs={}, resources={str(i): 1}) for i in range(3)
])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_object_manager.py
|
Python
|
from collections import defaultdict
import json
import multiprocessing
import numpy as np
import pytest
import time
import warnings
import ray
from ray import ray_constants
from ray.cluster_utils import Cluster
# TODO(yuhguo): This test file requires a lot of CPU/memory, and
# better be put in Jenkins. However, it fails frequently in Jenkins, but
# works well in Travis. We should consider moving it back to Jenkins once
# we figure out the reason.
if (multiprocessing.cpu_count() < 40
or ray.utils.get_system_memory() < 50 * 10**9):
warnings.warn("This test must be run on large machines.")
def create_cluster(num_nodes):
cluster = Cluster()
for i in range(num_nodes):
cluster.add_node(resources={str(i): 100}, object_store_memory=10**9)
ray.init(address=cluster.address)
return cluster
@pytest.fixture()
def ray_start_cluster_with_resource():
num_nodes = 5
cluster = create_cluster(num_nodes)
yield cluster, num_nodes
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
# This test is here to make sure that when we broadcast an object to a bunch of
# machines, we don't have too many excess object transfers.
@pytest.mark.skipif(ray_constants.direct_call_enabled(), reason="TODO(ekl)")
def test_object_broadcast(ray_start_cluster_with_resource):
cluster, num_nodes = ray_start_cluster_with_resource
@ray.remote
def f(x):
return
x = np.zeros(10 * 1024 * 1024, dtype=np.uint8)
@ray.remote
def create_object():
return np.zeros(10 * 1024 * 1024, dtype=np.uint8)
object_ids = []
for _ in range(3):
# Broadcast an object to all machines.
x_id = ray.put(x)
object_ids.append(x_id)
ray.get([
f._remote(args=[x_id], resources={str(i % num_nodes): 1})
for i in range(10 * num_nodes)
])
for _ in range(3):
# Broadcast an object to all machines.
x_id = create_object.remote()
object_ids.append(x_id)
ray.get([
f._remote(args=[x_id], resources={str(i % num_nodes): 1})
for i in range(10 * num_nodes)
])
# Wait for profiling information to be pushed to the profile table.
time.sleep(1)
transfer_events = ray.object_transfer_timeline()
# Make sure that each object was transferred a reasonable number of times.
for x_id in object_ids:
relevant_events = [
event for event in transfer_events
if event["cat"] == "transfer_send"
and event["args"][0] == x_id.hex() and event["args"][2] == 1
]
# NOTE: Each event currently appears twice because we duplicate the
# send and receive boxes to underline them with a box (black if it is a
# send and gray if it is a receive). So we need to remove these extra
# boxes here.
deduplicated_relevant_events = [
event for event in relevant_events if event["cname"] != "black"
]
assert len(deduplicated_relevant_events) * 2 == len(relevant_events)
relevant_events = deduplicated_relevant_events
# Each object must have been broadcast to each remote machine.
assert len(relevant_events) >= num_nodes - 1
# If more object transfers than necessary have been done, print a
# warning.
if len(relevant_events) > num_nodes - 1:
warnings.warn("This object was transferred {} times, when only {} "
"transfers were required.".format(
len(relevant_events), num_nodes - 1))
# Each object should not have been broadcast more than once from every
# machine to every other machine. Also, a pair of machines should not
# both have sent the object to each other.
assert len(relevant_events) <= (num_nodes - 1) * num_nodes / 2
# Make sure that no object was sent multiple times between the same
# pair of object managers.
send_counts = defaultdict(int)
for event in relevant_events:
# The pid identifies the sender and the tid identifies the
# receiver.
send_counts[(event["pid"], event["tid"])] += 1
assert all(value == 1 for value in send_counts.values())
# When submitting an actor method, we try to pre-emptively push its arguments
# to the actor's object manager. However, in the past we did not deduplicate
# the pushes and so the same object could get shipped to the same object
# manager many times. This test checks that that isn't happening.
def test_actor_broadcast(ray_start_cluster_with_resource):
cluster, num_nodes = ray_start_cluster_with_resource
@ray.remote
class Actor:
def ready(self):
pass
def set_weights(self, x):
pass
actors = [
Actor._remote(
args=[],
kwargs={},
num_cpus=0.01,
resources={str(i % num_nodes): 1}) for i in range(30)
]
# Wait for the actors to start up.
ray.get([a.ready.remote() for a in actors])
object_ids = []
# Broadcast a large object to all actors.
for _ in range(5):
x_id = ray.put(np.zeros(10**7, dtype=np.uint8))
object_ids.append(x_id)
# Pass the object into a method for every actor.
ray.get([a.set_weights.remote(x_id) for a in actors])
# Wait for profiling information to be pushed to the profile table.
time.sleep(1)
transfer_events = ray.object_transfer_timeline()
# Make sure that each object was transferred a reasonable number of times.
for x_id in object_ids:
relevant_events = [
event for event in transfer_events
if event["cat"] == "transfer_send"
and event["args"][0] == x_id.hex() and event["args"][2] == 1
]
# NOTE: Each event currently appears twice because we duplicate the
# send and receive boxes to underline them with a box (black if it is a
# send and gray if it is a receive). So we need to remove these extra
# boxes here.
deduplicated_relevant_events = [
event for event in relevant_events if event["cname"] != "black"
]
assert len(deduplicated_relevant_events) * 2 == len(relevant_events)
relevant_events = deduplicated_relevant_events
# Each object must have been broadcast to each remote machine.
assert len(relevant_events) >= num_nodes - 1
# If more object transfers than necessary have been done, print a
# warning.
if len(relevant_events) > num_nodes - 1:
warnings.warn("This object was transferred {} times, when only {} "
"transfers were required.".format(
len(relevant_events), num_nodes - 1))
# Each object should not have been broadcast more than once from every
# machine to every other machine. Also, a pair of machines should not
# both have sent the object to each other.
assert len(relevant_events) <= (num_nodes - 1) * num_nodes / 2
# Make sure that no object was sent multiple times between the same
# pair of object managers.
send_counts = defaultdict(int)
for event in relevant_events:
# The pid identifies the sender and the tid identifies the
# receiver.
send_counts[(event["pid"], event["tid"])] += 1
assert all(value == 1 for value in send_counts.values())
# The purpose of this test is to make sure that an object that was already been
# transferred to a node can be transferred again.
def test_object_transfer_retry(ray_start_cluster):
cluster = ray_start_cluster
repeated_push_delay = 4
# Force the sending object manager to allow duplicate pushes again sooner.
# Also, force the receiving object manager to retry the Pull sooner. We
# make the chunk size smaller in order to make it easier to test objects
# with multiple chunks.
config = json.dumps({
"object_manager_repeated_push_delay_ms": repeated_push_delay * 1000,
"object_manager_pull_timeout_ms": repeated_push_delay * 1000 / 4,
"object_manager_default_chunk_size": 1000
})
object_store_memory = 150 * 1024 * 1024
cluster.add_node(
object_store_memory=object_store_memory, _internal_config=config)
cluster.add_node(
num_gpus=1,
object_store_memory=object_store_memory,
_internal_config=config)
ray.init(address=cluster.address)
@ray.remote(num_gpus=1)
def f(size):
return np.zeros(size, dtype=np.uint8)
# Transfer an object to warm up the object manager.
ray.get(f.remote(10**6))
x_ids = [f.remote(10**i) for i in [6]]
assert not any(
ray.worker.global_worker.core_worker.object_exists(x_id)
for x_id in x_ids)
# Get the objects locally to cause them to be transferred. This is the
# first time the objects are getting transferred, so it should happen
# quickly.
start_time = time.time()
xs = ray.get(x_ids)
end_time = time.time()
if end_time - start_time > repeated_push_delay:
warnings.warn("The initial transfer took longer than the repeated "
"push delay, so this test may not be testing the thing "
"it's supposed to test.")
# Cause all objects to be flushed.
del xs
x = np.zeros(object_store_memory // 10, dtype=np.uint8)
for _ in range(15):
ray.put(x)
assert not any(
ray.worker.global_worker.core_worker.object_exists(x_id)
for x_id in x_ids)
end_time = time.time()
# Make sure that the first time the objects get transferred, it happens
# quickly.
assert end_time - start_time < repeated_push_delay
# Get the objects again and make sure they get transferred.
xs = ray.get(x_ids)
end_transfer_time = time.time()
# We should have had to wait for the repeated push delay.
assert end_transfer_time - start_time >= repeated_push_delay
# Flush the objects again and wait longer than the repeated push delay and
# make sure that the objects are transferred again.
del xs
for _ in range(15):
ray.put(x)
assert not any(
ray.worker.global_worker.core_worker.object_exists(x_id)
for x_id in x_ids)
time.sleep(repeated_push_delay)
# Get the objects locally to cause them to be transferred. This should
# happen quickly.
start_time = time.time()
ray.get(x_ids)
end_time = time.time()
assert end_time - start_time < repeated_push_delay
# The purpose of this test is to make sure we can transfer many objects. In the
# past, this has caused failures in which object managers create too many open
# files and run out of resources.
def test_many_small_transfers(ray_start_cluster_with_resource):
cluster, num_nodes = ray_start_cluster_with_resource
@ray.remote
def f(*args):
pass
# This function creates 1000 objects on each machine and then transfers
# each object to every other machine.
def do_transfers():
id_lists = []
for i in range(num_nodes):
id_lists.append([
f._remote(args=[], kwargs={}, resources={str(i): 1})
for _ in range(1000)
])
ids = []
for i in range(num_nodes):
for j in range(num_nodes):
if i == j:
continue
ids.append(
f._remote(
args=id_lists[j], kwargs={}, resources={str(i): 1}))
# Wait for all of the transfers to finish.
ray.get(ids)
do_transfers()
do_transfers()
do_transfers()
do_transfers()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.