file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
java/runtime/src/main/java/org/ray/runtime/object/NativeRayObject.java | Java | package org.ray.runtime.object;
import com.google.common.base.Preconditions;
/**
* Binary representation of a ray object. See `RayObject` class in C++ for details.
*/
public class NativeRayObject {
public byte[] data;
public byte[] metadata;
public NativeRayObject(byte[] data, byte[] metadata) {
Preconditions.checkState(bufferLength(data) > 0 || bufferLength(metadata) > 0);
this.data = data;
this.metadata = metadata;
}
private static int bufferLength(byte[] buffer) {
if (buffer == null) {
return 0;
}
return buffer.length;
}
@Override
public String toString() {
return "<data>: " + bufferLength(data) + ", <metadata>: " + bufferLength(metadata);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/object/ObjectSerializer.java | Java | package org.ray.runtime.object;
import java.util.Arrays;
import org.ray.api.exception.RayActorException;
import org.ray.api.exception.RayTaskException;
import org.ray.api.exception.RayWorkerException;
import org.ray.api.exception.UnreconstructableException;
import org.ray.api.id.ObjectId;
import org.ray.runtime.generated.Gcs.ErrorType;
import org.ray.runtime.util.Serializer;
/**
* Serialize to and deserialize from {@link NativeRayObject}. Metadata is generated during
* serialization and respected during deserialization.
*/
public class ObjectSerializer {
private static final byte[] WORKER_EXCEPTION_META = String
.valueOf(ErrorType.WORKER_DIED.getNumber()).getBytes();
private static final byte[] ACTOR_EXCEPTION_META = String
.valueOf(ErrorType.ACTOR_DIED.getNumber()).getBytes();
private static final byte[] UNRECONSTRUCTABLE_EXCEPTION_META = String
.valueOf(ErrorType.OBJECT_UNRECONSTRUCTABLE.getNumber()).getBytes();
private static final byte[] TASK_EXECUTION_EXCEPTION_META = String
.valueOf(ErrorType.TASK_EXECUTION_EXCEPTION.getNumber()).getBytes();
private static final byte[] RAW_TYPE_META = "RAW".getBytes();
/**
* Deserialize an object from an {@link NativeRayObject} instance.
*
* @param nativeRayObject The object to deserialize.
* @param objectId The associated object ID of the object.
* @param classLoader The classLoader of the object.
* @return The deserialized object.
*/
public static Object deserialize(NativeRayObject nativeRayObject, ObjectId objectId,
ClassLoader classLoader) {
byte[] meta = nativeRayObject.metadata;
byte[] data = nativeRayObject.data;
if (meta != null && meta.length > 0) {
// If meta is not null, deserialize the object from meta.
if (Arrays.equals(meta, RAW_TYPE_META)) {
return data;
} else if (Arrays.equals(meta, WORKER_EXCEPTION_META)) {
return new RayWorkerException();
} else if (Arrays.equals(meta, ACTOR_EXCEPTION_META)) {
return new RayActorException();
} else if (Arrays.equals(meta, UNRECONSTRUCTABLE_EXCEPTION_META)) {
return new UnreconstructableException(objectId);
} else if (Arrays.equals(meta, TASK_EXECUTION_EXCEPTION_META)) {
return Serializer.decode(data, classLoader);
}
throw new IllegalArgumentException("Unrecognized metadata " + Arrays.toString(meta));
} else {
// If data is not null, deserialize the Java object.
return Serializer.decode(data, classLoader);
}
}
/**
* Serialize an Java object to an {@link NativeRayObject} instance.
*
* @param object The object to serialize.
* @return The serialized object.
*/
public static NativeRayObject serialize(Object object) {
if (object instanceof NativeRayObject) {
return (NativeRayObject) object;
} else if (object instanceof byte[]) {
// If the object is a byte array, skip serializing it and use a special metadata to
// indicate it's raw binary. So that this object can also be read by Python.
return new NativeRayObject((byte[]) object, RAW_TYPE_META);
} else if (object instanceof RayTaskException) {
return new NativeRayObject(Serializer.encode(object),
TASK_EXECUTION_EXCEPTION_META);
} else {
return new NativeRayObject(Serializer.encode(object), null);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/object/ObjectStore.java | Java | package org.ray.runtime.object;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import org.ray.api.RayObject;
import org.ray.api.WaitResult;
import org.ray.api.exception.RayException;
import org.ray.api.id.ObjectId;
import org.ray.runtime.context.WorkerContext;
/**
* A class that is used to put/get objects to/from the object store.
*/
public abstract class ObjectStore {
private final WorkerContext workerContext;
public ObjectStore(WorkerContext workerContext) {
this.workerContext = workerContext;
}
/**
* Put a raw object into object store.
*
* @param obj The ray object.
* @return Generated ID of the object.
*/
public abstract ObjectId putRaw(NativeRayObject obj);
/**
* Put a raw object with specified ID into object store.
*
* @param obj The ray object.
* @param objectId Object ID specified by user.
*/
public abstract void putRaw(NativeRayObject obj, ObjectId objectId);
/**
* Serialize and put an object to the object store.
*
* @param object The object to put.
* @return Id of the object.
*/
public ObjectId put(Object object) {
if (object instanceof NativeRayObject) {
throw new IllegalArgumentException(
"Trying to put a NativeRayObject. Please use putRaw instead.");
}
return putRaw(ObjectSerializer.serialize(object));
}
/**
* Serialize and put an object to the object store, with the given object id.
*
* This method is only used for testing.
*
* @param object The object to put.
* @param objectId Object id.
*/
public void put(Object object, ObjectId objectId) {
if (object instanceof NativeRayObject) {
throw new IllegalArgumentException(
"Trying to put a NativeRayObject. Please use putRaw instead.");
}
putRaw(ObjectSerializer.serialize(object), objectId);
}
/**
* Get a list of raw objects from the object store.
*
* @param objectIds IDs of the objects to get.
* @param timeoutMs Timeout in milliseconds, wait infinitely if it's negative.
* @return Result list of objects data.
*/
public abstract List<NativeRayObject> getRaw(List<ObjectId> objectIds, long timeoutMs);
/**
* Get a list of objects from the object store.
*
* @param ids List of the object ids.
* @param <T> Type of these objects.
* @return A list of GetResult objects.
*/
@SuppressWarnings("unchecked")
public <T> List<T> get(List<ObjectId> ids) {
// Pass -1 as timeout to wait until all objects are available in object store.
List<NativeRayObject> dataAndMetaList = getRaw(ids, -1);
List<T> results = new ArrayList<>();
for (int i = 0; i < dataAndMetaList.size(); i++) {
NativeRayObject dataAndMeta = dataAndMetaList.get(i);
Object object = null;
if (dataAndMeta != null) {
object = ObjectSerializer
.deserialize(dataAndMeta, ids.get(i), workerContext.getCurrentClassLoader());
}
if (object instanceof RayException) {
// If the object is a `RayException`, it means that an error occurred during task
// execution.
throw (RayException) object;
}
results.add((T) object);
}
// This check must be placed after the throw exception statement.
// Because if there was any exception, The get operation would return early
// and wouldn't wait until all objects exist.
Preconditions.checkState(dataAndMetaList.stream().allMatch(Objects::nonNull));
return results;
}
/**
* Wait for a list of objects to appear in the object store.
*
* @param objectIds IDs of the objects to wait for.
* @param numObjects Number of objects that should appear.
* @param timeoutMs Timeout in milliseconds, wait infinitely if it's negative.
* @return A bitset that indicates each object has appeared or not.
*/
public abstract List<Boolean> wait(List<ObjectId> objectIds, int numObjects, long timeoutMs);
/**
* Wait for a list of RayObjects to be locally available, until specified number of objects are
* ready, or specified timeout has passed.
*
* @param waitList A list of RayObject to wait for.
* @param numReturns The number of objects that should be returned.
* @param timeoutMs The maximum time in milliseconds to wait before returning.
* @return Two lists, one containing locally available objects, one containing the rest.
*/
public <T> WaitResult<T> wait(List<RayObject<T>> waitList, int numReturns, int timeoutMs) {
Preconditions.checkNotNull(waitList);
if (waitList.isEmpty()) {
return new WaitResult<>(Collections.emptyList(), Collections.emptyList());
}
List<ObjectId> ids = waitList.stream().map(RayObject::getId).collect(Collectors.toList());
List<Boolean> ready = wait(ids, numReturns, timeoutMs);
List<RayObject<T>> readyList = new ArrayList<>();
List<RayObject<T>> unreadyList = new ArrayList<>();
for (int i = 0; i < ready.size(); i++) {
if (ready.get(i)) {
readyList.add(waitList.get(i));
} else {
unreadyList.add(waitList.get(i));
}
}
return new WaitResult<>(readyList, unreadyList);
}
/**
* Delete a list of objects from the object store.
*
* @param objectIds IDs of the objects to delete.
* @param localOnly Whether only delete the objects in local node, or all nodes in the
* cluster.
* @param deleteCreatingTasks Whether also delete the tasks that created these objects.
*/
public abstract void delete(List<ObjectId> objectIds, boolean localOnly,
boolean deleteCreatingTasks);
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/object/RayObjectImpl.java | Java | package org.ray.runtime.object;
import java.io.Serializable;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.id.ObjectId;
/**
* Implementation of {@link RayObject}.
*/
public final class RayObjectImpl<T> implements RayObject<T>, Serializable {
private final ObjectId id;
/**
* Cache the result of `Ray.get()`.
*
* Note, this is necessary for direct calls, in which case, it's not allowed to call `Ray.get` on
* the same object twice.
*/
private T object;
/**
* Whether the object is already gotten from the object store.
*/
private boolean objectGotten;
public RayObjectImpl(ObjectId id) {
this.id = id;
object = null;
objectGotten = false;
}
@Override
public synchronized T get() {
if (!objectGotten) {
object = Ray.get(id);
objectGotten = true;
}
return object;
}
@Override
public ObjectId getId() {
return id;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/runner/RunManager.java | Java | package org.ray.runtime.runner;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import java.io.File;
import java.io.IOException;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.ray.runtime.config.RayConfig;
import org.ray.runtime.util.FileUtil;
import org.ray.runtime.util.ResourceUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
/**
* Ray service management on one box.
*/
public class RunManager {
private static final Logger LOGGER = LoggerFactory.getLogger(RunManager.class);
private static final DateTimeFormatter DATE_TIME_FORMATTER =
DateTimeFormatter.ofPattern("Y-M-d_H-m-s");
private static final String WORKER_CLASS = "org.ray.runtime.runner.worker.DefaultWorker";
private RayConfig rayConfig;
private Random random;
private List<Pair<String, Process>> processes;
private static final int KILL_PROCESS_WAIT_TIMEOUT_SECONDS = 1;
public RunManager(RayConfig rayConfig) {
this.rayConfig = rayConfig;
processes = new ArrayList<>();
random = new Random();
}
public void cleanup() {
// Terminate the processes in the reversed order of creating them.
// Because raylet needs to exit before object store, otherwise it
// cannot exit gracefully.
for (int i = processes.size() - 1; i >= 0; --i) {
Pair<String, Process> pair = processes.get(i);
terminateProcess(pair.getLeft(), pair.getRight());
}
}
public void terminateProcess(String name, Process p) {
int numAttempts = 0;
while (p.isAlive()) {
if (numAttempts == 0) {
LOGGER.debug("Terminating process {}.", name);
p.destroy();
} else {
LOGGER.debug("Terminating process {} forcibly.", name);
p.destroyForcibly();
}
try {
p.waitFor(KILL_PROCESS_WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOGGER.warn("Got InterruptedException while waiting for process {}" +
" to be terminated.", name);
}
numAttempts++;
}
LOGGER.info("Process {} is now terminated.", name);
}
/**
* Get processes by name. For test purposes only.
*/
public List<Process> getProcesses(String name) {
return processes.stream().filter(pair -> pair.getLeft().equals(name)).map(Pair::getRight)
.collect(Collectors.toList());
}
private void createTempDirs() {
try {
FileUtils.forceMkdir(new File(rayConfig.logDir));
FileUtils.forceMkdir(new File(rayConfig.rayletSocketName).getParentFile());
FileUtils.forceMkdir(new File(rayConfig.objectStoreSocketName).getParentFile());
} catch (IOException e) {
LOGGER.error("Couldn't create temp directories.", e);
throw new RuntimeException(e);
}
}
/**
* Start a process.
*
* @param command The command to start the process with.
* @param env Environment variables.
* @param name Process name.
*/
private void startProcess(List<String> command, Map<String, String> env, String name) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Starting process {} with command: {}", name,
Joiner.on(" ").join(command));
}
ProcessBuilder builder = new ProcessBuilder(command);
String stdout = "";
String stderr = "";
if (rayConfig.redirectOutput) {
// Set stdout and stderr paths.
int logId = random.nextInt(10000);
String date = DATE_TIME_FORMATTER.format(LocalDateTime.now());
stdout = String.format("%s/%s-%s-%05d.out", rayConfig.logDir, name, date, logId);
stderr = String.format("%s/%s-%s-%05d.err", rayConfig.logDir, name, date, logId);
builder.redirectOutput(new File(stdout));
builder.redirectError(new File(stderr));
}
// Set environment variables.
if (env != null && !env.isEmpty()) {
builder.environment().putAll(env);
}
Process p;
try {
p = builder.start();
} catch (IOException e) {
LOGGER.error("Failed to start process " + name, e);
throw new RuntimeException("Failed to start process " + name, e);
}
// Wait 200ms and check whether the process is alive.
try {
TimeUnit.MILLISECONDS.sleep(200);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (!p.isAlive()) {
throw new RuntimeException(
String.format("Failed to start %s. Exit code: %d.", name, p.exitValue()));
}
processes.add(Pair.of(name, p));
if (LOGGER.isInfoEnabled()) {
String message = String.format("%s process started.", name);
if (rayConfig.redirectOutput) {
message += String.format(" Logs are redirected to %s and %s.", stdout, stderr);
}
LOGGER.info(message);
}
}
/**
* Start all Ray processes on this node.
*
* @param isHead Whether this node is the head node. If true, redis server will be started.
*/
public void startRayProcesses(boolean isHead) {
LOGGER.info("Starting ray processes @ {}.", rayConfig.nodeIp);
try {
createTempDirs();
if (isHead) {
startRedisServer();
}
startObjectStore();
startRaylet();
LOGGER.info("All processes started @ {}.", rayConfig.nodeIp);
} catch (Exception e) {
// Clean up started processes.
cleanup();
LOGGER.error("Failed to start ray processes.", e);
throw new RuntimeException("Failed to start ray processes.", e);
}
}
private void startRedisServer() {
// start primary redis
String primary = startRedisInstance(rayConfig.nodeIp,
rayConfig.headRedisPort, rayConfig.headRedisPassword, null);
rayConfig.setRedisAddress(primary);
try (Jedis client = new Jedis("127.0.0.1", rayConfig.headRedisPort)) {
if (!Strings.isNullOrEmpty(rayConfig.headRedisPassword)) {
client.auth(rayConfig.headRedisPassword);
}
client.set("UseRaylet", "1");
// Set job counter to compute job id.
client.set("JobCounter", "0");
// Register the number of Redis shards in the primary shard, so that clients
// know how many redis shards to expect under RedisShards.
client.set("NumRedisShards", Integer.toString(rayConfig.numberRedisShards));
// start redis shards
for (int i = 0; i < rayConfig.numberRedisShards; i++) {
String shard = startRedisInstance(rayConfig.nodeIp,
rayConfig.headRedisPort + i + 1, rayConfig.headRedisPassword, i);
client.rpush("RedisShards", shard);
}
}
}
private String startRedisInstance(String ip, int port, String password, Integer shard) {
try (FileUtil.TempFile redisServerFile = FileUtil.getTempFileFromResource("redis-server")) {
try (FileUtil.TempFile redisModuleFile = FileUtil.getTempFileFromResource(
"libray_redis_module.so")) {
redisServerFile.getFile().setExecutable(true);
List<String> command = Lists.newArrayList(
// The redis-server executable file.
redisServerFile.getFile().getAbsolutePath(),
"--protected-mode",
"no",
"--port",
String.valueOf(port),
"--loglevel",
"warning",
"--loadmodule",
// The redis module file.
redisModuleFile.getFile().getAbsolutePath()
);
if (!Strings.isNullOrEmpty(password)) {
command.add("--requirepass ");
command.add(password);
}
String name = shard == null ? "redis" : "redis-" + shard;
startProcess(command, null, name);
}
}
try (Jedis client = new Jedis("127.0.0.1", port)) {
if (!Strings.isNullOrEmpty(password)) {
client.auth(password);
}
// Configure Redis to only generate notifications for the export keys.
client.configSet("notify-keyspace-events", "Kl");
// Put a time stamp in Redis to indicate when it was started.
client.set("redis_start_time", LocalDateTime.now().toString());
}
return ip + ":" + port;
}
private void startRaylet() {
int hardwareConcurrency = Runtime.getRuntime().availableProcessors();
int maximumStartupConcurrency = Math.max(1,
Math.min(rayConfig.resources.getOrDefault("CPU", 0.0).intValue(), hardwareConcurrency));
String redisPasswordOption = "";
if (!Strings.isNullOrEmpty(rayConfig.headRedisPassword)) {
redisPasswordOption = rayConfig.headRedisPassword;
}
// See `src/ray/raylet/main.cc` for the meaning of each parameter.
try (FileUtil.TempFile rayletFile = FileUtil.getTempFileFromResource("raylet")) {
rayletFile.getFile().setExecutable(true);
List<String> command = ImmutableList.of(
rayletFile.getFile().getAbsolutePath(),
String.format("--raylet_socket_name=%s", rayConfig.rayletSocketName),
String.format("--store_socket_name=%s", rayConfig.objectStoreSocketName),
String.format("--object_manager_port=%d", 0), // The object manager port.
// The node manager port.
String.format("--node_manager_port=%d", rayConfig.getNodeManagerPort()),
String.format("--node_ip_address=%s", rayConfig.nodeIp),
String.format("--redis_address=%s", rayConfig.getRedisIp()),
String.format("--redis_port=%d", rayConfig.getRedisPort()),
String.format("--num_initial_workers=%d", 0), // number of initial workers
String.format("--maximum_startup_concurrency=%d", maximumStartupConcurrency),
String.format("--static_resource_list=%s",
ResourceUtil.getResourcesStringFromMap(rayConfig.resources)),
String.format("--config_list=%s", String.join(",", rayConfig.rayletConfigParameters)),
String.format("--python_worker_command=%s", buildPythonWorkerCommand()),
String.format("--java_worker_command=%s", buildWorkerCommandRaylet()),
String.format("--redis_password=%s", redisPasswordOption)
);
startProcess(command, null, "raylet");
}
}
private String concatPath(Stream<String> stream) {
// TODO (hchen): Right now, raylet backend doesn't support worker command with spaces.
// Thus, we have to drop some some paths until that is fixed.
return stream.filter(s -> !s.contains(" ")).collect(Collectors.joining(":"));
}
private String buildWorkerCommandRaylet() {
List<String> cmd = new ArrayList<>();
cmd.add("java");
cmd.add("-classpath");
// Generate classpath based on current classpath + user-defined classpath.
String classpath = concatPath(Stream.concat(
rayConfig.classpath.stream(),
Stream.of(System.getProperty("java.class.path").split(":"))
));
cmd.add(classpath);
// library path
String libraryPath = concatPath(rayConfig.libraryPath.stream());
cmd.add("-Djava.library.path=" + libraryPath);
// logging path
if (rayConfig.redirectOutput) {
cmd.add("-Dray.logging.stdout=org.apache.log4j.varia.NullAppender");
cmd.add("-Dray.logging.file=org.apache.log4j.FileAppender");
int logId = random.nextInt(10000);
String date = DATE_TIME_FORMATTER.format(LocalDateTime.now());
String logFile = String.format("%s/worker-%s-%05d.out", rayConfig.logDir, date, logId);
cmd.add("-Dray.logging.file.path=" + logFile);
}
if (!Strings.isNullOrEmpty(rayConfig.jobResourcePath)) {
cmd.add("-Dray.job.resource-path=" + rayConfig.jobResourcePath);
}
// socket names
cmd.add("-Dray.raylet.socket-name=" + rayConfig.rayletSocketName);
cmd.add("-Dray.object-store.socket-name=" + rayConfig.objectStoreSocketName);
cmd.add("-Dray.raylet.node-manager-port=" + rayConfig.getNodeManagerPort());
// Config overwrite
cmd.add("-Dray.redis.address=" + rayConfig.getRedisAddress());
// redis password
if (!Strings.isNullOrEmpty(rayConfig.headRedisPassword)) {
cmd.add("-Dray.redis.password=" + rayConfig.headRedisPassword);
}
// Number of workers per Java worker process
cmd.add("-Dray.raylet.config.num_workers_per_process_java=RAY_WORKER_NUM_WORKERS_PLACEHOLDER");
cmd.addAll(rayConfig.jvmParameters);
// jvm options
cmd.add("RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_0");
// Main class
cmd.add(WORKER_CLASS);
String command = Joiner.on(" ").join(cmd);
LOGGER.debug("Worker command is: {}", command);
return command;
}
private void startObjectStore() {
try (FileUtil.TempFile plasmaStoreFile = FileUtil
.getTempFileFromResource("plasma_store_server")) {
plasmaStoreFile.getFile().setExecutable(true);
List<String> command = ImmutableList.of(
// The plasma store executable file.
plasmaStoreFile.getFile().getAbsolutePath(),
"-s",
rayConfig.objectStoreSocketName,
"-m",
rayConfig.objectStoreSize.toString()
);
startProcess(command, null, "plasma_store");
}
}
private String buildPythonWorkerCommand() {
// disable python worker start from raylet, which starts from java
if (rayConfig.pythonWorkerCommand == null) {
return "";
}
List<String> cmd = new ArrayList<>();
cmd.add(rayConfig.pythonWorkerCommand);
cmd.add("--node-ip-address=" + rayConfig.nodeIp);
cmd.add("--object-store-name=" + rayConfig.objectStoreSocketName);
cmd.add("--raylet-name=" + rayConfig.rayletSocketName);
cmd.add("--redis-address=" + rayConfig.getRedisAddress());
String command = cmd.stream().collect(Collectors.joining(" "));
LOGGER.debug("python worker command: {}", command);
return command;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/runner/worker/DefaultDriver.java | Java | package org.ray.runtime.runner.worker;
import org.ray.api.Ray;
/**
* The main function of DefaultDriver.
*/
public class DefaultDriver {
//
// " --node-ip-address=" + ip
// + " --redis-address=" + redisAddress
// + " --driver-class" + className
//
public static void main(String[] args) {
try {
System.setProperty("ray.worker.mode", "DRIVER");
Ray.init();
String driverClass = null;
String driverArgs = null;
Class<?> cls = Class.forName(driverClass);
String[] argsArray = (driverArgs != null) ? driverArgs.split(",") : (new String[] {});
cls.getMethod("main", String[].class).invoke(null, (Object) argsArray);
} catch (Throwable e) {
e.printStackTrace();
System.exit(-1);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/runner/worker/DefaultWorker.java | Java | package org.ray.runtime.runner.worker;
import org.ray.api.Ray;
import org.ray.api.runtime.RayRuntime;
import org.ray.runtime.RayMultiWorkerNativeRuntime;
import org.ray.runtime.RayNativeRuntime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default implementation of the worker process.
*/
public class DefaultWorker {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultWorker.class);
public static void main(String[] args) {
try {
System.setProperty("ray.worker.mode", "WORKER");
// Set run-mode to `CLUSTER` explicitly, to prevent the DefaultWorker to receive
// a wrong run-mode parameter through jvm options.
System.setProperty("ray.run-mode", "CLUSTER");
Thread.setDefaultUncaughtExceptionHandler((Thread t, Throwable e) -> {
LOGGER.error("Uncaught worker exception in thread {}: {}", t, e);
});
Ray.init();
LOGGER.info("Worker started.");
RayRuntime runtime = Ray.internal();
if (runtime instanceof RayNativeRuntime) {
((RayNativeRuntime)runtime).run();
} else if (runtime instanceof RayMultiWorkerNativeRuntime) {
((RayMultiWorkerNativeRuntime)runtime).run();
} else {
throw new RuntimeException("Unknown RayRuntime: " + runtime);
}
} catch (Exception e) {
LOGGER.error("Failed to start worker.", e);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/ArgumentsBuilder.java | Java | package org.ray.runtime.task;
import java.util.ArrayList;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.id.ObjectId;
import org.ray.api.runtime.RayRuntime;
import org.ray.runtime.AbstractRayRuntime;
import org.ray.runtime.RayMultiWorkerNativeRuntime;
import org.ray.runtime.generated.Common.Language;
import org.ray.runtime.object.NativeRayObject;
import org.ray.runtime.object.ObjectSerializer;
/**
* Helper methods to convert arguments from/to objects.
*/
public class ArgumentsBuilder {
/**
* If the the size of an argument's serialized data is smaller than this number, the argument will
* be passed by value. Otherwise it'll be passed by reference.
*/
private static final int LARGEST_SIZE_PASS_BY_VALUE = 100 * 1024;
/**
* This dummy type is also defined in signature.py. Please keep it synced.
*/
private static final NativeRayObject PYTHON_DUMMY_TYPE = ObjectSerializer
.serialize("__RAY_DUMMY__".getBytes());
/**
* Convert real function arguments to task spec arguments.
*/
public static List<FunctionArg> wrap(Object[] args, Language language, boolean isDirectCall) {
List<FunctionArg> ret = new ArrayList<>();
for (Object arg : args) {
ObjectId id = null;
NativeRayObject value = null;
if (arg instanceof RayObject) {
if (isDirectCall) {
throw new IllegalArgumentException(
"Passing RayObject to a direct call actor is not supported.");
}
id = ((RayObject) arg).getId();
} else {
value = ObjectSerializer.serialize(arg);
if (!isDirectCall && value.data.length > LARGEST_SIZE_PASS_BY_VALUE) {
RayRuntime runtime = Ray.internal();
if (runtime instanceof RayMultiWorkerNativeRuntime) {
runtime = ((RayMultiWorkerNativeRuntime) runtime).getCurrentRuntime();
}
id = ((AbstractRayRuntime) runtime).getObjectStore()
.putRaw(value);
value = null;
}
}
if (language == Language.PYTHON) {
ret.add(FunctionArg.passByValue(PYTHON_DUMMY_TYPE));
}
if (id != null) {
ret.add(FunctionArg.passByReference(id));
} else {
ret.add(FunctionArg.passByValue(value));
}
}
return ret;
}
/**
* Convert list of NativeRayObject to real function arguments.
*/
public static Object[] unwrap(List<NativeRayObject> args, ClassLoader classLoader) {
Object[] realArgs = new Object[args.size()];
for (int i = 0; i < args.size(); i++) {
realArgs[i] = ObjectSerializer.deserialize(args.get(i), null, classLoader);
}
return realArgs;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/FunctionArg.java | Java | package org.ray.runtime.task;
import com.google.common.base.Preconditions;
import org.ray.api.id.ObjectId;
import org.ray.runtime.object.NativeRayObject;
/**
* Represents a function argument in task spec.
* Either `id` or `data` should be null, when id is not null, this argument will be
* passed by reference, otherwise it will be passed by value.
*/
public class FunctionArg {
/**
* The id of this argument (passed by reference).
*/
public final ObjectId id;
/**
* Serialized data of this argument (passed by value).
*/
public final NativeRayObject value;
private FunctionArg(ObjectId id, NativeRayObject value) {
Preconditions.checkState((id == null) != (value == null));
this.id = id;
this.value = value;
}
/**
* Create a FunctionArg that will be passed by reference.
*/
public static FunctionArg passByReference(ObjectId id) {
return new FunctionArg(id, null);
}
/**
* Create a FunctionArg that will be passed by value.
*/
public static FunctionArg passByValue(NativeRayObject value) {
return new FunctionArg(null, value);
}
@Override
public String toString() {
if (id != null) {
return "<id>: " + id.toString();
} else {
return value.toString();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/LocalModeTaskExecutor.java | Java | package org.ray.runtime.task;
import org.ray.api.id.ActorId;
import org.ray.runtime.AbstractRayRuntime;
/**
* Task executor for local mode.
*/
public class LocalModeTaskExecutor extends TaskExecutor {
public LocalModeTaskExecutor(AbstractRayRuntime runtime) {
super(runtime);
}
@Override
protected void maybeSaveCheckpoint(Object actor, ActorId actorId) {
}
@Override
protected void maybeLoadCheckpoint(Object actor, ActorId actorId) {
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/LocalModeTaskSubmitter.java | Java | package org.ray.runtime.task;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.ByteString;
import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import org.ray.api.RayActor;
import org.ray.api.id.ActorId;
import org.ray.api.id.ObjectId;
import org.ray.api.id.TaskId;
import org.ray.api.options.ActorCreationOptions;
import org.ray.api.options.CallOptions;
import org.ray.runtime.actor.LocalModeRayActor;
import org.ray.runtime.context.LocalModeWorkerContext;
import org.ray.runtime.RayDevRuntime;
import org.ray.runtime.functionmanager.FunctionDescriptor;
import org.ray.runtime.functionmanager.JavaFunctionDescriptor;
import org.ray.runtime.generated.Common.ActorCreationTaskSpec;
import org.ray.runtime.generated.Common.ActorTaskSpec;
import org.ray.runtime.generated.Common.Language;
import org.ray.runtime.generated.Common.TaskArg;
import org.ray.runtime.generated.Common.TaskSpec;
import org.ray.runtime.generated.Common.TaskType;
import org.ray.runtime.object.NativeRayObject;
import org.ray.runtime.object.LocalModeObjectStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Task submitter for local mode.
*/
public class LocalModeTaskSubmitter implements TaskSubmitter {
private static final Logger LOGGER = LoggerFactory.getLogger(LocalModeTaskSubmitter.class);
private final Map<ObjectId, Set<TaskSpec>> waitingTasks = new HashMap<>();
private final Object taskAndObjectLock = new Object();
private final RayDevRuntime runtime;
private final LocalModeObjectStore objectStore;
/// The thread pool to execute actor tasks.
private final Map<ActorId, ExecutorService> actorTaskExecutorServices;
/// The thread pool to execute normal tasks.
private final ExecutorService normalTaskExecutorService;
private final Deque<TaskExecutor> idleTaskExecutors = new ArrayDeque<>();
private final Map<ActorId, TaskExecutor> actorTaskExecutors = new HashMap<>();
private final Object taskExecutorLock = new Object();
private final ThreadLocal<TaskExecutor> currentTaskExecutor = new ThreadLocal<>();
public LocalModeTaskSubmitter(RayDevRuntime runtime, LocalModeObjectStore objectStore,
int numberThreads) {
this.runtime = runtime;
this.objectStore = objectStore;
// The thread pool that executes normal tasks in parallel.
normalTaskExecutorService = Executors.newFixedThreadPool(numberThreads);
// The thread pool that executes actor tasks in parallel.
actorTaskExecutorServices = new HashMap<>();
}
public void onObjectPut(ObjectId id) {
Set<TaskSpec> tasks;
synchronized (taskAndObjectLock) {
tasks = waitingTasks.remove(id);
if (tasks != null) {
for (TaskSpec task : tasks) {
Set<ObjectId> unreadyObjects = getUnreadyObjects(task);
if (unreadyObjects.isEmpty()) {
submitTaskSpec(task);
}
}
}
}
}
/**
* Get the worker of current thread. <br> NOTE: Cannot be used for multi-threading in worker.
*/
public TaskExecutor getCurrentTaskExecutor() {
return currentTaskExecutor.get();
}
/**
* Get a worker from the worker pool to run the given task.
*/
private TaskExecutor getTaskExecutor(TaskSpec task) {
TaskExecutor taskExecutor;
synchronized (taskExecutorLock) {
if (task.getType() == TaskType.ACTOR_TASK) {
taskExecutor = actorTaskExecutors.get(getActorId(task));
} else if (task.getType() == TaskType.ACTOR_CREATION_TASK) {
taskExecutor = new LocalModeTaskExecutor(runtime);
actorTaskExecutors.put(getActorId(task), taskExecutor);
} else if (idleTaskExecutors.size() > 0) {
taskExecutor = idleTaskExecutors.pop();
} else {
taskExecutor = new LocalModeTaskExecutor(runtime);
}
}
currentTaskExecutor.set(taskExecutor);
return taskExecutor;
}
/**
* Return the worker to the worker pool.
*/
private void returnTaskExecutor(TaskExecutor worker, TaskSpec taskSpec) {
currentTaskExecutor.remove();
synchronized (taskExecutorLock) {
if (taskSpec.getType() == TaskType.NORMAL_TASK) {
idleTaskExecutors.push(worker);
}
}
}
private Set<ObjectId> getUnreadyObjects(TaskSpec taskSpec) {
Set<ObjectId> unreadyObjects = new HashSet<>();
// Check whether task arguments are ready.
for (TaskArg arg : taskSpec.getArgsList()) {
for (ByteString idByteString : arg.getObjectIdsList()) {
ObjectId id = new ObjectId(idByteString.toByteArray());
if (!objectStore.isObjectReady(id)) {
unreadyObjects.add(id);
}
}
}
if (taskSpec.getType() == TaskType.ACTOR_TASK) {
ObjectId dummyObjectId = new ObjectId(
taskSpec.getActorTaskSpec().getPreviousActorTaskDummyObjectId().toByteArray());
if (!objectStore.isObjectReady(dummyObjectId)) {
unreadyObjects.add(dummyObjectId);
}
}
return unreadyObjects;
}
private TaskSpec.Builder getTaskSpecBuilder(TaskType taskType,
FunctionDescriptor functionDescriptor, List<FunctionArg> args) {
byte[] taskIdBytes = new byte[TaskId.LENGTH];
new Random().nextBytes(taskIdBytes);
return TaskSpec.newBuilder()
.setType(taskType)
.setLanguage(Language.JAVA)
.setJobId(
ByteString.copyFrom(runtime.getRayConfig().getJobId().getBytes()))
.setTaskId(ByteString.copyFrom(taskIdBytes))
.addAllFunctionDescriptor(functionDescriptor.toList().stream().map(ByteString::copyFromUtf8)
.collect(Collectors.toList()))
.addAllArgs(args.stream().map(arg -> arg.id != null ? TaskArg.newBuilder()
.addObjectIds(ByteString.copyFrom(arg.id.getBytes())).build()
: TaskArg.newBuilder().setData(ByteString.copyFrom(arg.value.data))
.setMetadata(arg.value.metadata != null ? ByteString
.copyFrom(arg.value.metadata) : ByteString.EMPTY).build())
.collect(Collectors.toList()));
}
@Override
public List<ObjectId> submitTask(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
int numReturns, CallOptions options) {
Preconditions.checkState(numReturns <= 1);
TaskSpec taskSpec = getTaskSpecBuilder(TaskType.NORMAL_TASK, functionDescriptor, args)
.setNumReturns(numReturns)
.build();
submitTaskSpec(taskSpec);
return getReturnIds(taskSpec);
}
@Override
public RayActor createActor(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
ActorCreationOptions options) {
ActorId actorId = ActorId.fromRandom();
TaskSpec taskSpec = getTaskSpecBuilder(TaskType.ACTOR_CREATION_TASK, functionDescriptor, args)
.setNumReturns(1)
.setActorCreationTaskSpec(ActorCreationTaskSpec.newBuilder()
.setActorId(ByteString.copyFrom(actorId.toByteBuffer()))
.build())
.build();
submitTaskSpec(taskSpec);
return new LocalModeRayActor(actorId, getReturnIds(taskSpec).get(0));
}
@Override
public List<ObjectId> submitActorTask(RayActor actor, FunctionDescriptor functionDescriptor,
List<FunctionArg> args, int numReturns, CallOptions options) {
Preconditions.checkState(numReturns <= 1);
TaskSpec.Builder builder = getTaskSpecBuilder(TaskType.ACTOR_TASK, functionDescriptor, args);
List<ObjectId> returnIds = getReturnIds(
TaskId.fromBytes(builder.getTaskId().toByteArray()), numReturns + 1);
TaskSpec taskSpec = builder
.setNumReturns(numReturns + 1)
.setActorTaskSpec(
ActorTaskSpec.newBuilder().setActorId(ByteString.copyFrom(actor.getId().getBytes()))
.setPreviousActorTaskDummyObjectId(ByteString.copyFrom(
((LocalModeRayActor) actor)
.exchangePreviousActorTaskDummyObjectId(returnIds.get(returnIds.size() - 1))
.getBytes()))
.build())
.build();
submitTaskSpec(taskSpec);
if (numReturns == 0) {
return ImmutableList.of();
} else {
return ImmutableList.of(returnIds.get(0));
}
}
public void shutdown() {
// Shutdown actor task executor service.
synchronized (actorTaskExecutorServices) {
for (Map.Entry<ActorId, ExecutorService> item : actorTaskExecutorServices.entrySet()) {
item.getValue().shutdown();
}
}
// Shutdown normal task executor service.
normalTaskExecutorService.shutdown();
}
public static ActorId getActorId(TaskSpec taskSpec) {
ByteString actorId = null;
if (taskSpec.getType() == TaskType.ACTOR_CREATION_TASK) {
actorId = taskSpec.getActorCreationTaskSpec().getActorId();
} else if (taskSpec.getType() == TaskType.ACTOR_TASK) {
actorId = taskSpec.getActorTaskSpec().getActorId();
}
if (actorId == null) {
return null;
}
return ActorId.fromBytes(actorId.toByteArray());
}
private void submitTaskSpec(TaskSpec taskSpec) {
LOGGER.debug("Submitting task: {}.", taskSpec);
synchronized (taskAndObjectLock) {
Set<ObjectId> unreadyObjects = getUnreadyObjects(taskSpec);
final Runnable runnable = () -> {
TaskExecutor taskExecutor = getTaskExecutor(taskSpec);
try {
List<NativeRayObject> args = getFunctionArgs(taskSpec).stream()
.map(arg -> arg.id != null ?
objectStore.getRaw(Collections.singletonList(arg.id), -1).get(0)
: arg.value)
.collect(Collectors.toList());
((LocalModeWorkerContext) runtime.getWorkerContext()).setCurrentTask(taskSpec);
List<NativeRayObject> returnObjects = taskExecutor
.execute(getJavaFunctionDescriptor(taskSpec).toList(), args);
((LocalModeWorkerContext) runtime.getWorkerContext()).setCurrentTask(null);
List<ObjectId> returnIds = getReturnIds(taskSpec);
for (int i = 0; i < returnIds.size(); i++) {
NativeRayObject putObject;
if (i >= returnObjects.size()) {
// If the task is an actor task or an actor creation task,
// put the dummy object in object store, so those tasks which depends on it
// can be executed.
putObject = new NativeRayObject(new byte[]{1}, null);
} else {
putObject = returnObjects.get(i);
}
objectStore.putRaw(putObject, returnIds.get(i));
}
} finally {
returnTaskExecutor(taskExecutor, taskSpec);
}
};
if (unreadyObjects.isEmpty()) {
// If all dependencies are ready, execute this task.
if (taskSpec.getType() == TaskType.ACTOR_CREATION_TASK) {
ExecutorService actorExecutorService = Executors.newSingleThreadExecutor();
synchronized (actorTaskExecutorServices) {
actorTaskExecutorServices.put(getActorId(taskSpec), actorExecutorService);
}
actorExecutorService.submit(runnable);
} else if (taskSpec.getType() == TaskType.ACTOR_TASK) {
synchronized (actorTaskExecutorServices) {
ExecutorService actorExecutorService = actorTaskExecutorServices.get(getActorId(taskSpec));
actorExecutorService.submit(runnable);
}
} else {
// Normal task.
normalTaskExecutorService.submit(runnable);
}
} else {
// If some dependencies aren't ready yet, put this task in waiting list.
for (ObjectId id : unreadyObjects) {
waitingTasks.computeIfAbsent(id, k -> new HashSet<>()).add(taskSpec);
}
}
}
}
private static JavaFunctionDescriptor getJavaFunctionDescriptor(TaskSpec taskSpec) {
List<ByteString> functionDescriptor = taskSpec.getFunctionDescriptorList();
return new JavaFunctionDescriptor(functionDescriptor.get(0).toStringUtf8(),
functionDescriptor.get(1).toStringUtf8(), functionDescriptor.get(2).toStringUtf8());
}
private static List<FunctionArg> getFunctionArgs(TaskSpec taskSpec) {
List<FunctionArg> functionArgs = new ArrayList<>();
for (int i = 0; i < taskSpec.getArgsCount(); i++) {
TaskArg arg = taskSpec.getArgs(i);
if (arg.getObjectIdsCount() > 0) {
functionArgs.add(FunctionArg
.passByReference(new ObjectId(arg.getObjectIds(0).toByteArray())));
} else {
functionArgs.add(FunctionArg.passByValue(
new NativeRayObject(arg.getData().toByteArray(), arg.getMetadata().toByteArray())));
}
}
return functionArgs;
}
private static List<ObjectId> getReturnIds(TaskSpec taskSpec) {
return getReturnIds(TaskId.fromBytes(taskSpec.getTaskId().toByteArray()),
taskSpec.getNumReturns());
}
private static List<ObjectId> getReturnIds(TaskId taskId, long numReturns) {
List<ObjectId> returnIds = new ArrayList<>();
for (int i = 0; i < numReturns; i++) {
returnIds.add(ObjectId.fromByteBuffer(
(ByteBuffer) ByteBuffer.allocate(ObjectId.LENGTH).put(taskId.getBytes())
.putInt(TaskId.LENGTH, i + 1).position(0)));
}
return returnIds;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/NativeTaskExecutor.java | Java | package org.ray.runtime.task;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.List;
import org.ray.api.Checkpointable;
import org.ray.api.Checkpointable.Checkpoint;
import org.ray.api.Checkpointable.CheckpointContext;
import org.ray.api.id.ActorId;
import org.ray.api.id.UniqueId;
import org.ray.runtime.AbstractRayRuntime;
/**
* Task executor for cluster mode.
*/
public class NativeTaskExecutor extends TaskExecutor {
// TODO(hchen): Use the C++ config.
private static final int NUM_ACTOR_CHECKPOINTS_TO_KEEP = 20;
/**
* The native pointer of core worker.
*/
private final long nativeCoreWorkerPointer;
/**
* Number of tasks executed since last actor checkpoint.
*/
private int numTasksSinceLastCheckpoint = 0;
/**
* IDs of this actor's previous checkpoints.
*/
private List<UniqueId> checkpointIds;
/**
* Timestamp of the last actor checkpoint.
*/
private long lastCheckpointTimestamp = 0;
public NativeTaskExecutor(long nativeCoreWorkerPointer, AbstractRayRuntime runtime) {
super(runtime);
this.nativeCoreWorkerPointer = nativeCoreWorkerPointer;
}
@Override
protected void maybeSaveCheckpoint(Object actor, ActorId actorId) {
if (!(actor instanceof Checkpointable)) {
return;
}
CheckpointContext checkpointContext = new CheckpointContext(actorId,
++numTasksSinceLastCheckpoint, System.currentTimeMillis() - lastCheckpointTimestamp);
Checkpointable checkpointable = (Checkpointable) actor;
if (!checkpointable.shouldCheckpoint(checkpointContext)) {
return;
}
numTasksSinceLastCheckpoint = 0;
lastCheckpointTimestamp = System.currentTimeMillis();
UniqueId checkpointId = new UniqueId(nativePrepareCheckpoint(nativeCoreWorkerPointer));
checkpointIds.add(checkpointId);
if (checkpointIds.size() > NUM_ACTOR_CHECKPOINTS_TO_KEEP) {
((Checkpointable) actor).checkpointExpired(actorId, checkpointIds.get(0));
checkpointIds.remove(0);
}
checkpointable.saveCheckpoint(actorId, checkpointId);
}
@Override
protected void maybeLoadCheckpoint(Object actor, ActorId actorId) {
if (!(actor instanceof Checkpointable)) {
return;
}
numTasksSinceLastCheckpoint = 0;
lastCheckpointTimestamp = System.currentTimeMillis();
checkpointIds = new ArrayList<>();
List<Checkpoint> availableCheckpoints
= runtime.getGcsClient().getCheckpointsForActor(actorId);
if (availableCheckpoints.isEmpty()) {
return;
}
UniqueId checkpointId = ((Checkpointable) actor).loadCheckpoint(actorId, availableCheckpoints);
if (checkpointId != null) {
boolean checkpointValid = false;
for (Checkpoint checkpoint : availableCheckpoints) {
if (checkpoint.checkpointId.equals(checkpointId)) {
checkpointValid = true;
break;
}
}
Preconditions.checkArgument(checkpointValid,
"'loadCheckpoint' must return a checkpoint ID that exists in the "
+ "'availableCheckpoints' list, or null.");
nativeNotifyActorResumedFromCheckpoint(nativeCoreWorkerPointer, checkpointId.getBytes());
}
}
private static native byte[] nativePrepareCheckpoint(long nativeCoreWorkerPointer);
private static native void nativeNotifyActorResumedFromCheckpoint(long nativeCoreWorkerPointer,
byte[] checkpointId);
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/NativeTaskSubmitter.java | Java | package org.ray.runtime.task;
import com.google.common.base.Preconditions;
import java.util.List;
import java.util.stream.Collectors;
import org.ray.api.RayActor;
import org.ray.api.id.ObjectId;
import org.ray.api.options.ActorCreationOptions;
import org.ray.api.options.CallOptions;
import org.ray.runtime.actor.NativeRayActor;
import org.ray.runtime.functionmanager.FunctionDescriptor;
/**
* Task submitter for cluster mode. This is a wrapper class for core worker task interface.
*/
public class NativeTaskSubmitter implements TaskSubmitter {
/**
* The native pointer of core worker.
*/
private final long nativeCoreWorkerPointer;
public NativeTaskSubmitter(long nativeCoreWorkerPointer) {
this.nativeCoreWorkerPointer = nativeCoreWorkerPointer;
}
@Override
public List<ObjectId> submitTask(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
int numReturns, CallOptions options) {
List<byte[]> returnIds = nativeSubmitTask(nativeCoreWorkerPointer, functionDescriptor, args,
numReturns, options);
return returnIds.stream().map(ObjectId::new).collect(Collectors.toList());
}
@Override
public RayActor createActor(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
ActorCreationOptions options) {
byte[] actorId = nativeCreateActor(nativeCoreWorkerPointer, functionDescriptor, args,
options);
return NativeRayActor.create(nativeCoreWorkerPointer, actorId, functionDescriptor.getLanguage());
}
@Override
public List<ObjectId> submitActorTask(RayActor actor, FunctionDescriptor functionDescriptor,
List<FunctionArg> args, int numReturns, CallOptions options) {
Preconditions.checkState(actor instanceof NativeRayActor);
List<byte[]> returnIds = nativeSubmitActorTask(nativeCoreWorkerPointer,
actor.getId().getBytes(), functionDescriptor, args, numReturns,
options);
return returnIds.stream().map(ObjectId::new).collect(Collectors.toList());
}
private static native List<byte[]> nativeSubmitTask(long nativeCoreWorkerPointer,
FunctionDescriptor functionDescriptor, List<FunctionArg> args, int numReturns,
CallOptions callOptions);
private static native byte[] nativeCreateActor(long nativeCoreWorkerPointer,
FunctionDescriptor functionDescriptor, List<FunctionArg> args,
ActorCreationOptions actorCreationOptions);
private static native List<byte[]> nativeSubmitActorTask(long nativeCoreWorkerPointer,
byte[] actorId, FunctionDescriptor functionDescriptor, List<FunctionArg> args,
int numReturns, CallOptions callOptions);
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/TaskExecutor.java | Java | package org.ray.runtime.task;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.List;
import org.ray.api.exception.RayTaskException;
import org.ray.api.id.ActorId;
import org.ray.api.id.JobId;
import org.ray.api.id.TaskId;
import org.ray.runtime.AbstractRayRuntime;
import org.ray.runtime.functionmanager.JavaFunctionDescriptor;
import org.ray.runtime.functionmanager.RayFunction;
import org.ray.runtime.generated.Common.TaskType;
import org.ray.runtime.object.NativeRayObject;
import org.ray.runtime.object.ObjectSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The task executor, which executes tasks assigned by raylet continuously.
*/
public abstract class TaskExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(TaskExecutor.class);
protected final AbstractRayRuntime runtime;
/**
* The current actor object, if this worker is an actor, otherwise null.
*/
protected Object currentActor = null;
/**
* The exception that failed the actor creation task, if any.
*/
private Exception actorCreationException = null;
protected TaskExecutor(AbstractRayRuntime runtime) {
this.runtime = runtime;
}
protected List<NativeRayObject> execute(List<String> rayFunctionInfo,
List<NativeRayObject> argsBytes) {
JobId jobId = runtime.getWorkerContext().getCurrentJobId();
TaskType taskType = runtime.getWorkerContext().getCurrentTaskType();
TaskId taskId = runtime.getWorkerContext().getCurrentTaskId();
LOGGER.debug("Executing task {}", taskId);
List<NativeRayObject> returnObjects = new ArrayList<>();
ClassLoader oldLoader = Thread.currentThread().getContextClassLoader();
// Find the executable object.
RayFunction rayFunction = runtime.getFunctionManager()
.getFunction(jobId, parseFunctionDescriptor(rayFunctionInfo));
Preconditions.checkNotNull(rayFunction);
try {
Thread.currentThread().setContextClassLoader(rayFunction.classLoader);
runtime.getWorkerContext().setCurrentClassLoader(rayFunction.classLoader);
// Get local actor object and arguments.
Object actor = null;
if (taskType == TaskType.ACTOR_TASK) {
if (actorCreationException != null) {
throw actorCreationException;
}
actor = currentActor;
}
Object[] args = ArgumentsBuilder.unwrap(argsBytes, rayFunction.classLoader);
// Execute the task.
Object result;
if (!rayFunction.isConstructor()) {
result = rayFunction.getMethod().invoke(actor, args);
} else {
result = rayFunction.getConstructor().newInstance(args);
}
// Set result
if (taskType != TaskType.ACTOR_CREATION_TASK) {
if (taskType == TaskType.ACTOR_TASK) {
// TODO (kfstorm): handle checkpoint in core worker.
maybeSaveCheckpoint(actor, runtime.getWorkerContext().getCurrentActorId());
}
if (rayFunction.hasReturn()) {
returnObjects.add(ObjectSerializer.serialize(result));
}
} else {
// TODO (kfstorm): handle checkpoint in core worker.
maybeLoadCheckpoint(result, runtime.getWorkerContext().getCurrentActorId());
currentActor = result;
}
LOGGER.debug("Finished executing task {}", taskId);
} catch (Exception e) {
LOGGER.error("Error executing task " + taskId, e);
if (taskType != TaskType.ACTOR_CREATION_TASK) {
if (rayFunction.hasReturn()) {
returnObjects.add(ObjectSerializer
.serialize(new RayTaskException("Error executing task " + taskId, e)));
}
} else {
actorCreationException = e;
}
} finally {
Thread.currentThread().setContextClassLoader(oldLoader);
runtime.getWorkerContext().setCurrentClassLoader(null);
}
return returnObjects;
}
private JavaFunctionDescriptor parseFunctionDescriptor(List<String> rayFunctionInfo) {
Preconditions.checkState(rayFunctionInfo != null && rayFunctionInfo.size() == 3);
return new JavaFunctionDescriptor(rayFunctionInfo.get(0), rayFunctionInfo.get(1),
rayFunctionInfo.get(2));
}
protected abstract void maybeSaveCheckpoint(Object actor, ActorId actorId);
protected abstract void maybeLoadCheckpoint(Object actor, ActorId actorId);
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/task/TaskSubmitter.java | Java | package org.ray.runtime.task;
import java.util.List;
import org.ray.api.RayActor;
import org.ray.api.id.ObjectId;
import org.ray.api.options.ActorCreationOptions;
import org.ray.api.options.CallOptions;
import org.ray.runtime.functionmanager.FunctionDescriptor;
/**
* A set of methods to submit tasks and create actors.
*/
public interface TaskSubmitter {
/**
* Submit a normal task.
* @param functionDescriptor The remote function to execute.
* @param args Arguments of this task.
* @param numReturns Return object count.
* @param options Options for this task.
* @return Ids of the return objects.
*/
List<ObjectId> submitTask(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
int numReturns, CallOptions options);
/**
* Create an actor.
* @param functionDescriptor The remote function that generates the actor object.
* @param args Arguments of this task.
* @param options Options for this actor creation task.
* @return Handle to the actor.
*/
RayActor createActor(FunctionDescriptor functionDescriptor, List<FunctionArg> args,
ActorCreationOptions options);
/**
* Submit an actor task.
* @param actor Handle to the actor.
* @param functionDescriptor The remote function to execute.
* @param args Arguments of this task.
* @param numReturns Return object count.
* @param options Options for this task.
* @return Ids of the return objects.
*/
List<ObjectId> submitActorTask(RayActor actor, FunctionDescriptor functionDescriptor,
List<FunctionArg> args, int numReturns, CallOptions options);
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/FileUtil.java | Java | package org.ray.runtime.util;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FileUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(FileUtil.class);
/**
* Represents a temp file.
*
* This class implements the `AutoCloseable` interface. It can be used in a `try-with-resource`
* block. When exiting the block, the temp file will be automatically removed.
*/
public static class TempFile implements AutoCloseable {
File file;
TempFile(File file) {
this.file = file;
}
public File getFile() {
return file;
}
@Override
public void close() {
if (!file.delete()) {
LOGGER.warn("Couldn't delete temp file {}", file.getAbsolutePath());
}
}
}
/**
* Get a temp file from resource.
*
* @param resourceFileName File name.
* @return A `TempFile` object.
*/
public static TempFile getTempFileFromResource(String resourceFileName) {
File file;
try {
file = File.createTempFile(resourceFileName, "");
} catch (IOException e) {
throw new RuntimeException("Couldn't create temp file " + resourceFileName, e);
}
try (InputStream in = FileUtil.class.getResourceAsStream("/" + resourceFileName)) {
Preconditions.checkNotNull(in, "{} doesn't exist.", resourceFileName);
Files.copy(in, Paths.get(file.getCanonicalPath()), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new RuntimeException("Couldn't get temp file from resource " + resourceFileName, e);
}
return new TempFile(file);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/IdUtil.java | Java | package org.ray.runtime.util;
import org.ray.api.id.BaseId;
/**
* Helper method for different Ids. Note: any changes to these methods must be synced with C++
* helper functions in src/ray/common/id.h
*/
public class IdUtil {
/**
* Compute the murmur hash code of this ID.
*/
public static long murmurHashCode(BaseId id) {
return murmurHash64A(id.getBytes(), id.size(), 0);
}
/**
* This method is the same as `Hash()` method of `ID` class in ray/src/ray/common/id.h
*/
private static long murmurHash64A(byte[] data, int length, int seed) {
final long m = 0xc6a4a7935bd1e995L;
final int r = 47;
long h = (seed & 0xFFFFFFFFL) ^ (length * m);
int length8 = length / 8;
for (int i = 0; i < length8; i++) {
final int i8 = i * 8;
long k = ((long) data[i8] & 0xff)
+ (((long) data[i8 + 1] & 0xff) << 8)
+ (((long) data[i8 + 2] & 0xff) << 16)
+ (((long) data[i8 + 3] & 0xff) << 24)
+ (((long) data[i8 + 4] & 0xff) << 32)
+ (((long) data[i8 + 5] & 0xff) << 40)
+ (((long) data[i8 + 6] & 0xff) << 48)
+ (((long) data[i8 + 7] & 0xff) << 56);
k *= m;
k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
final int remaining = length % 8;
if (remaining >= 7) {
h ^= (long) (data[(length & ~7) + 6] & 0xff) << 48;
}
if (remaining >= 6) {
h ^= (long) (data[(length & ~7) + 5] & 0xff) << 40;
}
if (remaining >= 5) {
h ^= (long) (data[(length & ~7) + 4] & 0xff) << 32;
}
if (remaining >= 4) {
h ^= (long) (data[(length & ~7) + 3] & 0xff) << 24;
}
if (remaining >= 3) {
h ^= (long) (data[(length & ~7) + 2] & 0xff) << 16;
}
if (remaining >= 2) {
h ^= (long) (data[(length & ~7) + 1] & 0xff) << 8;
}
if (remaining >= 1) {
h ^= (long) (data[length & ~7] & 0xff);
h *= m;
}
h ^= h >>> r;
h *= m;
h ^= h >>> r;
return h;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/JniExceptionUtil.java | Java | package org.ray.runtime.util;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// Required by JNI macro RAY_CHECK_JAVA_EXCEPTION
public final class JniExceptionUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(JniExceptionUtil.class);
public static String getStackTrace(String fileName, int lineNumber, String function,
Throwable throwable) {
LOGGER.error("An unexpected exception occurred while executing Java code from JNI ({}:{} {}).",
fileName, lineNumber, function, throwable);
// Return the exception in string form to JNI.
return ExceptionUtils.getStackTrace(throwable);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/JniUtils.java | Java | package org.ray.runtime.util;
import com.google.common.base.Strings;
import com.google.common.collect.Sets;
import com.sun.jna.NativeLibrary;
import java.lang.reflect.Field;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JniUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(JniUtils.class);
private static Set<String> loadedLibs = Sets.newHashSet();
/**
* Loads the native library specified by the <code>libraryName</code> argument.
* The <code>libraryName</code> argument must not contain any platform specific
* prefix, file extension or path.
*
* @param libraryName the name of the library.
*/
public static synchronized void loadLibrary(String libraryName) {
loadLibrary(libraryName, false);
}
/**
* Loads the native library specified by the <code>libraryName</code> argument.
* The <code>libraryName</code> argument must not contain any platform specific
* prefix, file extension or path.
*
* @param libraryName the name of the library.
* @param exportSymbols export symbols of library so that it can be used by other libs.
*/
public static synchronized void loadLibrary(String libraryName, boolean exportSymbols) {
if (!loadedLibs.contains(libraryName)) {
LOGGER.debug("Loading native library {}.", libraryName);
// Load native library.
String fileName = System.mapLibraryName(libraryName);
String libPath = null;
try (FileUtil.TempFile libFile = FileUtil.getTempFileFromResource(fileName)) {
libPath = libFile.getFile().getAbsolutePath();
if (exportSymbols) {
// Expose library symbols using RTLD_GLOBAL which may be depended by other shared
// libraries.
NativeLibrary.getInstance(libFile.getFile().getAbsolutePath());
}
System.load(libPath);
}
LOGGER.debug("Native library loaded.");
resetLibraryPath(libPath);
loadedLibs.add(libraryName);
}
}
/**
* This is a hack to reset library path at runtime. Please don't use it outside of ray
*/
public static synchronized void resetLibraryPath(String libPath) {
if (Strings.isNullOrEmpty(libPath)) {
return;
}
String path = System.getProperty("java.library.path");
String separator = System.getProperty("path.separator");
if (Strings.isNullOrEmpty(path)) {
path = "";
} else {
path += separator;
}
path += String.join(separator, libPath);
// This is a hack to reset library path at runtime,
// see https://stackoverflow.com/questions/15409223/.
System.setProperty("java.library.path", path);
// Set sys_paths to null so that java.library.path will be re-evaluated next time it is needed.
final Field sysPathsField;
try {
sysPathsField = ClassLoader.class.getDeclaredField("sys_paths");
sysPathsField.setAccessible(true);
sysPathsField.set(null, null);
} catch (NoSuchFieldException | IllegalAccessException e) {
LOGGER.error("Failed to set library path.", e);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/LambdaUtils.java | Java | package org.ray.runtime.util;
import java.io.Serializable;
import java.lang.invoke.SerializedLambda;
import java.lang.reflect.Method;
/**
* see http://cr.openjdk.java.net/~briangoetz/lambda/lambda-translation.html.
*/
public final class LambdaUtils {
private LambdaUtils() {
}
public static SerializedLambda getSerializedLambda(Serializable lambda) {
// Note.
// the class of lambda which isAssignableFrom Serializable
// has an privte method:writeReplace
// This mechanism may be changed in the future
try {
Method m = lambda.getClass().getDeclaredMethod("writeReplace");
m.setAccessible(true);
return (SerializedLambda) m.invoke(lambda);
} catch (Exception e) {
throw new RuntimeException("failed to getSerializedLambda:" + lambda.getClass().getName(), e);
}
}
} | zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/NetworkUtil.java | Java | package org.ray.runtime.util;
import com.google.common.base.Strings;
import java.io.IOException;
import java.net.DatagramSocket;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.ServerSocket;
import java.util.Enumeration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NetworkUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(NetworkUtil.class);
public static String getIpAddress(String interfaceName) {
try {
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
while (interfaces.hasMoreElements()) {
NetworkInterface current = interfaces.nextElement();
if (!current.isUp() || current.isLoopback() || current.isVirtual()) {
continue;
}
if (!Strings.isNullOrEmpty(interfaceName) && !interfaceName
.equals(current.getDisplayName())) {
continue;
}
Enumeration<InetAddress> addresses = current.getInetAddresses();
while (addresses.hasMoreElements()) {
InetAddress addr = addresses.nextElement();
if (addr.isLoopbackAddress()) {
continue;
}
if (addr instanceof Inet6Address) {
continue;
}
return addr.getHostAddress();
}
}
LOGGER.warn("You need to correctly specify [ray.java] net_interface in config.");
} catch (Exception e) {
LOGGER.error("Can't get ip address, use 127.0.0.1 as default.", e);
}
return "127.0.0.1";
}
public static int getUnusedPort() {
int port;
try {
ServerSocket ss = new ServerSocket();
ss.bind(new InetSocketAddress(0));
port = ss.getLocalPort();
ss.close();
} catch (Exception e) {
throw new RuntimeException("Failed to bind to an available port.", e);
}
return port;
}
public static boolean isPortAvailable(int port) {
if (port < 1 || port > 65535) {
throw new IllegalArgumentException("Invalid start port: " + port);
}
try (ServerSocket ss = new ServerSocket(port); DatagramSocket ds = new DatagramSocket(port)) {
ss.setReuseAddress(true);
ds.setReuseAddress(true);
return true;
} catch (IOException ignored) {
/* should not be thrown */
return false;
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/ResourceUtil.java | Java | package org.ray.runtime.util;
import java.util.HashMap;
import java.util.Map;
public class ResourceUtil {
public static final String CPU_LITERAL = "CPU";
public static final String GPU_LITERAL = "GPU";
/**
* Convert resources map to a string that is used
* for the command line argument of starting raylet.
*
* @param resources The resources map to be converted.
* @return The starting-raylet command line argument, like "CPU,4,GPU,0".
*/
public static String getResourcesStringFromMap(Map<String, Double> resources) {
StringBuilder builder = new StringBuilder();
if (resources != null) {
int count = 1;
for (Map.Entry<String, Double> entry : resources.entrySet()) {
builder.append(entry.getKey()).append(",").append(entry.getValue());
if (count != resources.size()) {
builder.append(",");
}
count++;
}
}
return builder.toString();
}
/**
* Parse the static resources configure field and convert to the resources map.
*
* @param resources The static resources string to be parsed.
* @return The map whose key represents the resource name
* and the value represents the resource quantity.
* @throws IllegalArgumentException If the resources string's format does match,
* it will throw an IllegalArgumentException.
*/
public static Map<String, Double> getResourcesMapFromString(String resources)
throws IllegalArgumentException {
Map<String, Double> ret = new HashMap<>();
if (resources != null) {
String[] items = resources.split(",");
for (String item : items) {
String trimItem = item.trim();
if (trimItem.isEmpty()) {
continue;
}
String[] resourcePair = trimItem.split(":");
if (resourcePair.length != 2) {
throw new IllegalArgumentException("Format of static resources configure is invalid.");
}
final String resourceName = resourcePair[0].trim();
final Double resourceValue = Double.valueOf(resourcePair[1].trim());
ret.put(resourceName, resourceValue);
}
}
return ret;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/Serializer.java | Java | package org.ray.runtime.util;
import org.nustaq.serialization.FSTConfiguration;
import org.ray.runtime.actor.NativeRayActor;
import org.ray.runtime.actor.NativeRayActorSerializer;
/**
* Java object serialization TODO: use others (e.g. Arrow) for higher performance
*/
public class Serializer {
private static final ThreadLocal<FSTConfiguration> conf = ThreadLocal.withInitial(() -> {
FSTConfiguration conf = FSTConfiguration.createDefaultConfiguration();
conf.registerSerializer(NativeRayActor.class, new NativeRayActorSerializer(), true);
return conf;
});
public static byte[] encode(Object obj) {
return conf.get().asByteArray(obj);
}
public static byte[] encode(Object obj, ClassLoader classLoader) {
byte[] result;
FSTConfiguration current = conf.get();
if (classLoader != null && classLoader != current.getClassLoader()) {
ClassLoader old = current.getClassLoader();
current.setClassLoader(classLoader);
result = current.asByteArray(obj);
current.setClassLoader(old);
} else {
result = current.asByteArray(obj);
}
return result;
}
@SuppressWarnings("unchecked")
public static <T> T decode(byte[] bs) {
return (T) conf.get().asObject(bs);
}
@SuppressWarnings("unchecked")
public static <T> T decode(byte[] bs, ClassLoader classLoader) {
Object object;
FSTConfiguration current = conf.get();
if (classLoader != null && classLoader != current.getClassLoader()) {
ClassLoader old = current.getClassLoader();
current.setClassLoader(classLoader);
object = current.asObject(bs);
current.setClassLoader(old);
} else {
object = current.asObject(bs);
}
return (T) object;
}
public static void setClassloader(ClassLoader classLoader) {
conf.get().setClassLoader(classLoader);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/SystemUtil.java | Java | package org.ray.runtime.util;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* some utilities for system process.
*/
public class SystemUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(SystemUtil.class);
static final ReentrantLock pidlock = new ReentrantLock();
static Integer pid;
public static String userHome() {
return System.getProperty("user.home");
}
public static String userDir() {
return System.getProperty("user.dir");
}
public static boolean startWithJar(Class<?> cls) {
return cls.getResource(cls.getSimpleName() + ".class").getFile().split("!")[0].endsWith(".jar");
}
public static boolean startWithJar(String clsName) {
Class<?> cls;
try {
cls = Class.forName(clsName);
return cls.getResource(cls.getSimpleName() + ".class").getFile().split("!")[0]
.endsWith(".jar");
} catch (ClassNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
LOGGER.error("error at SystemUtil startWithJar", e);
return false;
}
}
public static int pid() {
if (pid == null) {
pidlock.lock();
try {
if (pid == null) {
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
String name = runtime.getName();
int index = name.indexOf("@");
if (index != -1) {
pid = Integer.parseInt(name.substring(0, index));
} else {
throw new RuntimeException("parse pid error:" + name);
}
}
} finally {
pidlock.unlock();
}
}
return pid;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/generator/BaseGenerator.java | Java | package org.ray.runtime.util.generator;
public abstract class BaseGenerator {
protected static final int MAX_PARAMETERS = 6;
protected StringBuilder sb;
protected void newLine(String line) {
sb.append(line).append("\n");
}
protected void newLine(int numIndents, String line) {
indents(numIndents);
newLine(line);
}
protected void indents(int numIndents) {
for (int i = 0; i < numIndents; i++) {
sb.append(" ");
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/generator/RayCallGenerator.java | Java | package org.ray.runtime.util.generator;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
/**
* A util class that generates `RayCall.java`, which provides type-safe interfaces for `Ray.call`
* and `Ray.createActor`.
*/
public class RayCallGenerator extends BaseGenerator {
/**
* @return Whole file content of `RayCall.java`.
*/
private String build() {
sb = new StringBuilder();
newLine("// generated automatically, do not modify.");
newLine("");
newLine("package org.ray.api;");
newLine("");
for (int i = 0; i <= MAX_PARAMETERS; i++) {
newLine("import org.ray.api.function.RayFunc" + i + ";");
}
for (int i = 0; i <= MAX_PARAMETERS; i++) {
newLine("import org.ray.api.function.RayFuncVoid" + i + ";");
}
newLine("import org.ray.api.options.ActorCreationOptions;");
newLine("import org.ray.api.options.CallOptions;");
newLine("");
newLine("/**");
newLine(" * This class provides type-safe interfaces for `Ray.call` and `Ray.createActor`.");
newLine(" **/");
newLine("@SuppressWarnings({\"rawtypes\", \"unchecked\"})");
newLine("class RayCall {");
newLine(1, "// =======================================");
newLine(1, "// Methods for remote function invocation.");
newLine(1, "// =======================================");
for (int i = 0; i <= MAX_PARAMETERS; i++) {
buildCalls(i, false, false, true, false);
buildCalls(i, false, false, true, true);
buildCalls(i, false, false, false, false);
buildCalls(i, false, false, false, true);
}
newLine(1, "// ===========================================");
newLine(1, "// Methods for remote actor method invocation.");
newLine(1, "// ===========================================");
for (int i = 0; i <= MAX_PARAMETERS - 1; i++) {
buildCalls(i, true, false, true, false);
buildCalls(i, true, false, false, false);
}
newLine(1, "// ===========================");
newLine(1, "// Methods for actor creation.");
newLine(1, "// ===========================");
for (int i = 0; i <= MAX_PARAMETERS; i++) {
buildCalls(i, false, true, true, false);
buildCalls(i, false, true, true, true);
}
newLine(1, "// ===========================");
newLine(1, "// Cross-language methods.");
newLine(1, "// ===========================");
for (int i = 0; i <= MAX_PARAMETERS; i++) {
buildPyCalls(i, false, false, false);
buildPyCalls(i, false, false, true);
}
for (int i = 0; i <= MAX_PARAMETERS - 1; i++) {
buildPyCalls(i, true, false, false);
}
for (int i = 0; i <= MAX_PARAMETERS; i++) {
buildPyCalls(i, false, true, false);
buildPyCalls(i, false, true, true);
}
newLine("}");
return sb.toString();
}
/**
* Build the `Ray.call` or `Ray.createActor` methods with the given number of parameters.
*
* @param numParameters the number of parameters
* @param forActor build actor api when true, otherwise build task api.
* @param hasReturn if true, build api for functions with return.
* @param forActorCreation build `Ray.createActor` when true, otherwise build `Ray.call`.
*/
private void buildCalls(int numParameters, boolean forActor,
boolean forActorCreation, boolean hasReturn, boolean hasOptionsParam) {
// Template of the generated function:
// public static [genericTypes] [returnType] [callFunc]([argsDeclaration]) {
// Objects[] args = new Object[]{[args]};
// return Ray.internal().[callFunc](f[, actor], args[, options]);
// }
// 1) Construct the `genericTypes` part, e.g. `<T0, T1, T2, R>`.
String genericTypes = "";
for (int i = 0; i < numParameters; i++) {
genericTypes += "T" + i + ", ";
}
if (forActor) {
// Actor generic type.
genericTypes = "A, " + genericTypes;
}
// Return generic type.
if (forActorCreation) {
genericTypes += "A, ";
} else {
if (hasReturn) {
genericTypes += "R, ";
}
}
if (!genericTypes.isEmpty()) {
// Trim trailing ", ";
genericTypes = genericTypes.substring(0, genericTypes.length() - 2);
genericTypes = "<" + genericTypes + ">";
}
// 2) Construct the `returnType` part.
String returnType;
if (forActorCreation) {
returnType = "RayActor<A>";
} else {
returnType = hasReturn ? "RayObject<R>" : "void";
}
// 3) Construct the `argsDeclaration` part.
String argsDeclarationPrefix = String.format("RayFunc%s%d%s f, ",
hasReturn ? "" : "Void",
!forActor ? numParameters : numParameters + 1,
genericTypes);
if (forActor) {
argsDeclarationPrefix += "RayActor<A> actor, ";
}
String callFunc = forActorCreation ? "createActor" : "call";
// Enumerate all combinations of the parameters.
for (String param : generateParameters(numParameters)) {
String argsDeclaration = argsDeclarationPrefix + param;
if (hasOptionsParam) {
argsDeclaration +=
forActorCreation ? "ActorCreationOptions options, " : "CallOptions options, ";
}
// Trim trailing ", ";
argsDeclaration = argsDeclaration.substring(0, argsDeclaration.length() - 2);
// Print the first line (method signature).
newLine(1, String.format(
"public static%s %s %s(%s) {",
genericTypes.isEmpty() ? "" : " " + genericTypes, returnType, callFunc, argsDeclaration
));
// 4) Construct the `args` part.
String args = "";
for (int i = 0; i < numParameters; i++) {
args += "t" + i + ", ";
}
// Trim trailing ", ";
if (!args.isEmpty()) {
args = args.substring(0, args.length() - 2);
}
// Print the second line (local args declaration).
newLine(2, String.format("Object[] args = new Object[]{%s};", args));
// 5) Construct the third line.
String callFuncArgs = "f, ";
if (forActor) {
callFuncArgs += "actor, ";
}
callFuncArgs += "args, ";
callFuncArgs += forActor ? "" : hasOptionsParam ? "options, " : "null, ";
callFuncArgs = callFuncArgs.substring(0, callFuncArgs.length() - 2);
newLine(2, String.format("%sRay.internal().%s(%s);",
hasReturn ? "return " : "", callFunc, callFuncArgs));
newLine(1, "}");
}
}
/**
* Build the `Ray.callPy` or `Ray.createPyActor` methods.
*
* @param forActor build actor api when true, otherwise build task api.
* @param forActorCreation build `Ray.createPyActor` when true, otherwise build `Ray.callPy`.
*/
private void buildPyCalls(int numParameters, boolean forActor,
boolean forActorCreation, boolean hasOptionsParam) {
String argList = "";
String paramList = "";
for (int i = 0; i < numParameters; i++) {
paramList += "Object obj" + i + ", ";
argList += "obj" + i + ", ";
}
if (argList.endsWith(", ")) {
argList = argList.substring(0, argList.length() - 2);
}
if (paramList.endsWith(", ")) {
paramList = paramList.substring(0, paramList.length() - 2);
}
String paramPrefix = "";
String funcArgs = "";
if (forActorCreation) {
paramPrefix += "String moduleName, String className";
funcArgs += "moduleName, className";
} else if (forActor) {
paramPrefix += "RayPyActor pyActor, String functionName";
funcArgs += "pyActor, functionName";
} else {
paramPrefix += "String moduleName, String functionName";
funcArgs += "moduleName, functionName";
}
if (numParameters > 0) {
paramPrefix += ", ";
}
String optionsParam;
if (hasOptionsParam) {
optionsParam = forActorCreation ? ", ActorCreationOptions options" : ", CallOptions options";
} else {
optionsParam = "";
}
String optionsArg;
if (forActor) {
optionsArg = "";
} else {
if (hasOptionsParam) {
optionsArg = ", options";
} else {
optionsArg = ", null";
}
}
String returnType = !forActorCreation ? "RayObject" : "RayPyActor";
String funcName = !forActorCreation ? "callPy" : "createPyActor";
funcArgs += ", args";
// Method signature.
newLine(1, String.format(
"public static %s %s(%s%s) {",
returnType, funcName, paramPrefix + paramList, optionsParam
));
// Method body.
newLine(2, String.format("Object[] args = new Object[]{%s};", argList));
newLine(2, String.format("return Ray.internal().%s(%s%s);", funcName, funcArgs, optionsArg));
newLine(1, "}");
}
private List<String> generateParameters(int numParams) {
List<String> res = new ArrayList<>();
dfs(0, numParams, "", res);
return res;
}
private void dfs(int pos, int numParams, String cur, List<String> res) {
if (pos >= numParams) {
res.add(cur);
return;
}
String nextParameter = String.format("T%d t%d, ", pos, pos);
dfs(pos + 1, numParams, cur + nextParameter, res);
nextParameter = String.format("RayObject<T%d> t%d, ", pos, pos);
dfs(pos + 1, numParams, cur + nextParameter, res);
}
public static void main(String[] args) throws IOException {
String path = System.getProperty("user.dir")
+ "/api/src/main/java/org/ray/api/RayCall.java";
FileUtils.write(new File(path), new RayCallGenerator().build(), Charset.defaultCharset());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/main/java/org/ray/runtime/util/generator/RayFuncGenerator.java | Java | package org.ray.runtime.util.generator;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.commons.io.FileUtils;
/**
* A util class that generates all the RayFuncX classes under org.ray.api.function package.
*/
public class RayFuncGenerator extends BaseGenerator {
private String generate(int numParameters, boolean hasReturn) {
sb = new StringBuilder();
String genericTypes = "";
String paramList = "";
for (int i = 0; i < numParameters; i++) {
genericTypes += "T" + i + ", ";
if (i > 0) {
paramList += ", ";
}
paramList += String.format("T%d t%d", i, i);
}
if (hasReturn) {
genericTypes += "R, ";
}
if (!genericTypes.isEmpty()) {
// Remove trailing ", ".
genericTypes = genericTypes.substring(0, genericTypes.length() - 2);
genericTypes = "<" + genericTypes + ">";
}
newLine("// generated automatically, do not modify.");
newLine("");
newLine("package org.ray.api.function;");
newLine("");
newLine("/**");
String comment = String.format(
" * Functional interface for a remote function that has %d parameter%s.",
numParameters, numParameters > 1 ? "s" : "");
newLine(comment);
newLine(" */");
newLine("@FunctionalInterface");
String className = "RayFunc" + (hasReturn ? "" : "Void") + numParameters;
newLine(String.format("public interface %s%s extends %s {",
className, genericTypes, hasReturn ? "RayFunc" : "RayFuncVoid"));
newLine("");
indents(1);
newLine(String.format("%s apply(%s) throws Exception;", hasReturn ? "R" : "void", paramList));
newLine("}");
return sb.toString();
}
public static void main(String[] args) throws IOException {
String root = System.getProperty("user.dir")
+ "/api/src/main/java/org/ray/api/function/";
RayFuncGenerator generator = new RayFuncGenerator();
for (int i = 0; i <= MAX_PARAMETERS; i++) {
// Functions that have return.
String content = generator.generate(i, true);
FileUtils.write(new File(root + "RayFunc" + i + ".java"), content,
Charset.defaultCharset());
// Functions that don't have return.
content = generator.generate(i, false);
FileUtils.write(new File(root + "RayFuncVoid" + i + ".java"), content,
Charset.defaultCharset());
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/runtime/src/test/java/org/ray/runtime/functionmanager/FunctionManagerTest.java | Java | package org.ray.runtime.functionmanager;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Map;
import javax.tools.JavaCompiler;
import javax.tools.ToolProvider;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.ray.api.annotation.RayRemote;
import org.ray.api.function.RayFunc0;
import org.ray.api.function.RayFunc1;
import org.ray.api.id.JobId;
import org.ray.runtime.functionmanager.FunctionManager.JobFunctionTable;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Tests for {@link FunctionManager}
*/
public class FunctionManagerTest {
@RayRemote
public static Object foo() {
return null;
}
@RayRemote
public static class Bar {
public Bar() {
}
public Object bar() {
return null;
}
}
private static RayFunc0<Object> fooFunc;
private static RayFunc1<Bar, Object> barFunc;
private static RayFunc0<Bar> barConstructor;
private static JavaFunctionDescriptor fooDescriptor;
private static JavaFunctionDescriptor barDescriptor;
private static JavaFunctionDescriptor barConstructorDescriptor;
@BeforeClass
public static void beforeClass() {
fooFunc = FunctionManagerTest::foo;
barConstructor = Bar::new;
barFunc = Bar::bar;
fooDescriptor = new JavaFunctionDescriptor(FunctionManagerTest.class.getName(), "foo",
"()Ljava/lang/Object;");
barDescriptor = new JavaFunctionDescriptor(Bar.class.getName(), "bar",
"()Ljava/lang/Object;");
barConstructorDescriptor = new JavaFunctionDescriptor(Bar.class.getName(),
FunctionManager.CONSTRUCTOR_NAME,
"()V");
}
@Test
public void testGetFunctionFromRayFunc() {
final FunctionManager functionManager = new FunctionManager(null);
// Test normal function.
RayFunction func = functionManager.getFunction(JobId.NIL, fooFunc);
Assert.assertFalse(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), fooDescriptor);
Assert.assertNotNull(func.getRayRemoteAnnotation());
// Test actor method
func = functionManager.getFunction(JobId.NIL, barFunc);
Assert.assertFalse(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), barDescriptor);
Assert.assertNull(func.getRayRemoteAnnotation());
// Test actor constructor
func = functionManager.getFunction(JobId.NIL, barConstructor);
Assert.assertTrue(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), barConstructorDescriptor);
Assert.assertNotNull(func.getRayRemoteAnnotation());
}
@Test
public void testGetFunctionFromFunctionDescriptor() {
final FunctionManager functionManager = new FunctionManager(null);
// Test normal function.
RayFunction func = functionManager.getFunction(JobId.NIL, fooDescriptor);
Assert.assertFalse(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), fooDescriptor);
Assert.assertNotNull(func.getRayRemoteAnnotation());
// Test actor method
func = functionManager.getFunction(JobId.NIL, barDescriptor);
Assert.assertFalse(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), barDescriptor);
Assert.assertNull(func.getRayRemoteAnnotation());
// Test actor constructor
func = functionManager.getFunction(JobId.NIL, barConstructorDescriptor);
Assert.assertTrue(func.isConstructor());
Assert.assertEquals(func.getFunctionDescriptor(), barConstructorDescriptor);
Assert.assertNotNull(func.getRayRemoteAnnotation());
}
@Test
public void testLoadFunctionTableForClass() {
JobFunctionTable functionTable = new JobFunctionTable(getClass().getClassLoader());
Map<Pair<String, String>, RayFunction> res = functionTable
.loadFunctionsForClass(Bar.class.getName());
// The result should 2 entries, one for the constructor, the other for bar.
Assert.assertEquals(res.size(), 2);
Assert.assertTrue(res.containsKey(
ImmutablePair.of(barDescriptor.name, barDescriptor.typeDescriptor)));
Assert.assertTrue(res.containsKey(
ImmutablePair.of(barConstructorDescriptor.name, barConstructorDescriptor.typeDescriptor)));
}
@Test
public void testGetFunctionFromLocalResource() throws Exception {
JobId jobId = JobId.fromInt(1);
final String resourcePath = FileUtils.getTempDirectoryPath() + "/ray_test_resources";
final String jobResourcePath = resourcePath + "/" + jobId.toString();
File jobResourceDir = new File(jobResourcePath);
FileUtils.deleteQuietly(jobResourceDir);
jobResourceDir.mkdirs();
jobResourceDir.deleteOnExit();
String demoJavaFile = "";
demoJavaFile += "public class DemoApp {\n";
demoJavaFile += " public static String hello() {\n";
demoJavaFile += " return \"hello\";\n";
demoJavaFile += " }\n";
demoJavaFile += "}";
// Write the demo java file to the job resource path.
String javaFilePath = jobResourcePath + "/DemoApp.java";
Files.write(Paths.get(javaFilePath), demoJavaFile.getBytes());
// Compile the java file.
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
int result = compiler.run(null, null, null, "-d", jobResourcePath, javaFilePath);
if (result != 0) {
throw new RuntimeException("Couldn't compile Demo.java.");
}
// Test loading the function.
JavaFunctionDescriptor descriptor = new JavaFunctionDescriptor(
"DemoApp", "hello", "()Ljava/lang/String;");
final FunctionManager functionManager = new FunctionManager(resourcePath);
RayFunction func = functionManager.getFunction(jobId, descriptor);
Assert.assertEquals(func.getFunctionDescriptor(), descriptor);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test.sh | Shell | #!/usr/bin/env bash
# Cause the script to exit if a single command fails.
set -e
# Show explicitly which commands are currently running.
set -x
ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd)
run_testng() {
$@ || exit_code=$?
# exit_code == 2 means there are skipped tests.
if [ $exit_code -ne 2 ] && [ $exit_code -ne 0 ] ; then
exit $exit_code
fi
}
pushd $ROOT_DIR/..
echo "Linting Java code with checkstyle."
# NOTE(hchen): The `test_tag_filters` option causes bazel to ignore caches.
# Thus, we add the `build_tests_only` option to avoid re-building everything.
bazel test //java:all --test_tag_filters="checkstyle" --build_tests_only
echo "Build java maven deps."
bazel build //java:gen_maven_deps
echo "Build test jar."
bazel build //java:all_tests_deploy.jar
echo "Running tests under cluster mode."
# TODO(hchen): Ideally, we should use the following bazel command to run Java tests. However, if there're skipped tests,
# TestNG will exit with code 2. And bazel treats it as test failure.
# bazel test //java:all_tests --action_env=ENABLE_MULTI_LANGUAGE_TESTS=1 --test_output="errors" || cluster_exit_code=$?
ENABLE_MULTI_LANGUAGE_TESTS=1 run_testng java -cp $ROOT_DIR/../bazel-bin/java/all_tests_deploy.jar org.testng.TestNG -d /tmp/ray_java_test_output $ROOT_DIR/testng.xml
echo "Running tests under cluster mode with direct actor call turned on."
ENABLE_MULTI_LANGUAGE_TESTS=1 ACTOR_CREATION_OPTIONS_DEFAULT_USE_DIRECT_CALL=1 run_testng java -cp $ROOT_DIR/../bazel-bin/java/all_tests_deploy.jar org.testng.TestNG -d /tmp/ray_java_test_output $ROOT_DIR/testng.xml
echo "Running tests under single-process mode."
# bazel test //java:all_tests --jvmopt="-Dray.run-mode=SINGLE_PROCESS" --test_output="errors" || single_exit_code=$?
run_testng java -Dray.run-mode="SINGLE_PROCESS" -cp $ROOT_DIR/../bazel-bin/java/all_tests_deploy.jar org.testng.TestNG -d /tmp/ray_java_test_output $ROOT_DIR/testng.xml
popd
pushd $ROOT_DIR
echo "Testing maven install."
mvn clean install -DskipTests
popd
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/RayAlterSuiteListener.java | Java | package org.ray.api;
import java.util.List;
import org.ray.api.options.ActorCreationOptions;
import org.testng.IAlterSuiteListener;
import org.testng.xml.XmlGroups;
import org.testng.xml.XmlRun;
import org.testng.xml.XmlSuite;
public class RayAlterSuiteListener implements IAlterSuiteListener {
@Override
public void alter(List<XmlSuite> suites) {
XmlSuite suite = suites.get(0);
if (ActorCreationOptions.DEFAULT_USE_DIRECT_CALL) {
XmlGroups groups = new XmlGroups();
XmlRun run = new XmlRun();
run.onInclude("directCall");
groups.setRun(run);
suite.setGroups(groups);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/TestProgressListener.java | Java | package org.ray.api;
import java.time.LocalDateTime;
import org.testng.IInvokedMethod;
import org.testng.IInvokedMethodListener;
import org.testng.ITestContext;
import org.testng.ITestListener;
import org.testng.ITestResult;
public class TestProgressListener implements IInvokedMethodListener, ITestListener {
private String getFullTestName(ITestResult iTestResult) {
return iTestResult.getTestClass().getName() + "."
+ iTestResult.getMethod().getMethodName();
}
private void printInfo(String tag, String content) {
System.out.println(
"============ [" + LocalDateTime.now().toString() + "] [" + tag + "] " + content
+ " ============");
}
@Override
public void beforeInvocation(IInvokedMethod method, ITestResult testResult) {
printInfo("INVOKE METHOD", getFullTestName(testResult));
}
@Override
public void afterInvocation(IInvokedMethod method, ITestResult testResult) {
}
@Override
public void onTestStart(ITestResult result) {
printInfo("TEST START", getFullTestName(result));
}
@Override
public void onTestSuccess(ITestResult result) {
printInfo("TEST SUCCESS", getFullTestName(result));
}
@Override
public void onTestFailure(ITestResult result) {
printInfo("TEST FAILURE", getFullTestName(result));
}
@Override
public void onTestSkipped(ITestResult result) {
printInfo("TEST SKIPPED", getFullTestName(result));
}
@Override
public void onTestFailedButWithinSuccessPercentage(ITestResult result) {
printInfo("TEST FAILED BUT WITHIN SUCCESS PERCENTAGE", getFullTestName(result));
}
@Override
public void onStart(ITestContext context) {
}
@Override
public void onFinish(ITestContext context) {
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/TestUtils.java | Java | package org.ray.api;
import com.google.common.base.Preconditions;
import java.io.Serializable;
import java.util.function.Supplier;
import org.ray.api.annotation.RayRemote;
import org.ray.api.options.ActorCreationOptions;
import org.ray.api.runtime.RayRuntime;
import org.ray.runtime.AbstractRayRuntime;
import org.ray.runtime.RayMultiWorkerNativeRuntime;
import org.ray.runtime.config.RunMode;
import org.testng.Assert;
import org.testng.SkipException;
public class TestUtils {
public static class LargeObject implements Serializable {
public byte[] data = new byte[1024 * 1024];
}
private static final int WAIT_INTERVAL_MS = 5;
public static void skipTestUnderSingleProcess() {
if (getRuntime().getRayConfig().runMode == RunMode.SINGLE_PROCESS) {
throw new SkipException("This test doesn't work under single-process mode.");
}
}
public static void skipTestUnderClusterMode() {
if (getRuntime().getRayConfig().runMode == RunMode.CLUSTER) {
throw new SkipException("This test doesn't work under cluster mode.");
}
}
public static void skipTestIfDirectActorCallEnabled() {
skipTestIfDirectActorCallEnabled(true);
}
public static void skipTestIfDirectActorCallDisabled() {
skipTestIfDirectActorCallEnabled(false);
}
private static void skipTestIfDirectActorCallEnabled(boolean enabled) {
if (enabled == ActorCreationOptions.DEFAULT_USE_DIRECT_CALL) {
throw new SkipException(String.format("This test doesn't work when direct actor call is %s.",
enabled ? "enabled" : "disabled"));
}
}
/**
* Wait until the given condition is met.
*
* @param condition A function that predicts the condition.
* @param timeoutMs Timeout in milliseconds.
* @return True if the condition is met within the timeout, false otherwise.
*/
public static boolean waitForCondition(Supplier<Boolean> condition, int timeoutMs) {
int waitTime = 0;
while (true) {
if (condition.get()) {
return true;
}
try {
java.util.concurrent.TimeUnit.MILLISECONDS.sleep(WAIT_INTERVAL_MS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
waitTime += WAIT_INTERVAL_MS;
if (waitTime > timeoutMs) {
break;
}
}
return false;
}
@RayRemote
private static String hi() {
return "hi";
}
/**
* Warm up the cluster to make sure there's at least one idle worker.
*
* This is needed before calling `wait`. Because, in Travis CI, starting a new worker
* process could be slower than the wait timeout.
* TODO(hchen): We should consider supporting always reversing a certain number of
* idle workers in Raylet's worker pool.
*/
public static void warmUpCluster() {
RayObject<String> obj = Ray.call(TestUtils::hi);
Assert.assertEquals(obj.get(), "hi");
}
public static AbstractRayRuntime getRuntime() {
RayRuntime runtime = Ray.internal();
if (runtime instanceof RayMultiWorkerNativeRuntime) {
runtime = ((RayMultiWorkerNativeRuntime) runtime).getCurrentRuntime();
}
Preconditions.checkState(runtime instanceof AbstractRayRuntime);
return (AbstractRayRuntime) runtime;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/ActorPressTest.java | Java | package org.ray.api.benchmark;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.annotations.Test;
public class ActorPressTest extends RayBenchmarkTest {
@Test
public void singleLatencyTest() {
int times = 10;
RayActor<ActorPressTest.Adder> adder = Ray.createActor(ActorPressTest.Adder::new);
super.singleLatencyTest(times, adder);
}
@Test
public void maxTest() {
int clientNum = 2;
int totalNum = 20;
RayActor<ActorPressTest.Adder> adder = Ray.createActor(ActorPressTest.Adder::new);
PressureTestParameter pressureTestParameter = new PressureTestParameter();
pressureTestParameter.setClientNum(clientNum);
pressureTestParameter.setTotalNum(totalNum);
pressureTestParameter.setRayBenchmarkTest(this);
pressureTestParameter.setRayActor(adder);
super.maxPressureTest(pressureTestParameter);
}
@Test
public void rateLimiterTest() {
int clientNum = 2;
int totalQps = 2;
int duration = 3;
RayActor<ActorPressTest.Adder> adder = Ray.createActor(ActorPressTest.Adder::new);
PressureTestParameter pressureTestParameter = new PressureTestParameter();
pressureTestParameter.setClientNum(clientNum);
pressureTestParameter.setTotalQps(totalQps);
pressureTestParameter.setDuration(duration);
pressureTestParameter.setRayBenchmarkTest(this);
pressureTestParameter.setRayActor(adder);
super.rateLimiterPressureTest(pressureTestParameter);
}
@Override
public RayObject<RemoteResult<Integer>> rayCall(RayActor rayActor) {
return Ray.call(Adder::add, (RayActor<Adder>) rayActor, 10);
}
@Override
public boolean checkResult(Object o) {
return true;
}
@RayRemote
public static class Adder {
private Integer sum = 0;
public RemoteResult<Integer> add(Integer n) {
RemoteResult<Integer> remoteResult = new RemoteResult<>();
remoteResult.setResult(sum += n);
remoteResult.setFinishTime(System.nanoTime());
return remoteResult;
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/MaxPressureTest.java | Java | package org.ray.api.benchmark;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.annotations.Test;
public class MaxPressureTest extends RayBenchmarkTest {
public static final int clientNum = 2;
public static final int totalNum = 10;
private static final long serialVersionUID = -1684518885171395952L;
@RayRemote
public static RemoteResult<Integer> currentTime() {
RemoteResult<Integer> remoteResult = new RemoteResult<>();
remoteResult.setFinishTime(System.nanoTime());
remoteResult.setResult(0);
return remoteResult;
}
@Test
public void test() {
PressureTestParameter pressureTestParameter = new PressureTestParameter();
pressureTestParameter.setClientNum(clientNum);
pressureTestParameter.setTotalNum(totalNum);
pressureTestParameter.setRayBenchmarkTest(this);
super.maxPressureTest(pressureTestParameter);
}
@Override
public RayObject<RemoteResult<Integer>> rayCall(RayActor rayActor) {
return Ray.call(MaxPressureTest::currentTime);
}
@Override
public boolean checkResult(Object o) {
return (int) o == 0;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/MicroBenchmarks.java | Java | package org.ray.api.benchmark;
import org.ray.api.Ray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MicroBenchmarks {
private static final Logger LOGGER = LoggerFactory.getLogger(MicroBenchmarks.class);
public static Object simpleFunction() {
return null;
}
private static void time(Runnable runnable, int numRepeats, String name) {
LOGGER.info("Benchmark \"{}\" started.", name);
final long start = System.nanoTime();
for (int i = 0; i < numRepeats; i++) {
runnable.run();
}
final long duration = System.nanoTime() - start;
LOGGER.info(
"Benchmark \"{}\" finished, repeated {} times, total duration {} ms," +
" average duration {} ns.",
name, numRepeats, duration / 1_000_000, duration / numRepeats);
}
/**
* Benchmark task submission.
*
* Note, this benchmark is supposed to measure the elapased time in Java worker, we should disable
* submitting tasks to raylet in `raylet_client.cc` before running this benchmark.
*/
public static void benchmarkTaskSubmission() {
final int numRepeats = 1_000_000;
Ray.init();
try {
time(() -> {
Ray.call(MicroBenchmarks::simpleFunction);
}, numRepeats, "task submission");
} finally {
Ray.shutdown();
}
}
public static void main(String[] args) {
benchmarkTaskSubmission();
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/PressureTestParameter.java | Java | package org.ray.api.benchmark;
import java.io.Serializable;
import org.ray.api.RayActor;
public class PressureTestParameter implements Serializable {
private static final long serialVersionUID = -52054601722982473L;
private Integer clientNum = 1; //number of test client
private PressureTestType pressureTestType = PressureTestType.RATE_LIMITER; //pressure test type
private Integer totalNum = 1; //total number of task under the mode of MAX
private Integer totalQps = 1; //total qps of task under the mode of RATE_LIMITER
private Integer duration = 1; //duration of the pressure test under the mode of RATE_LIMITER
private RayBenchmarkTest rayBenchmarkTest; //reference of current test case instance
private RayActor rayActor; // reference of the Actor, if only test remote funtion it could be null
public Integer getClientNum() {
return clientNum;
}
public void setClientNum(Integer clientNum) {
this.clientNum = clientNum;
}
public PressureTestType getPressureTestType() {
return pressureTestType;
}
public void setPressureTestType(PressureTestType pressureTestType) {
this.pressureTestType = pressureTestType;
}
public Integer getTotalNum() {
return totalNum;
}
public void setTotalNum(Integer totalNum) {
this.totalNum = totalNum;
}
public Integer getTotalQps() {
return totalQps;
}
public void setTotalQps(Integer totalQps) {
this.totalQps = totalQps;
}
public Integer getDuration() {
return duration;
}
public void setDuration(Integer duration) {
this.duration = duration;
}
public RayBenchmarkTest getRayBenchmarkTest() {
return rayBenchmarkTest;
}
public void setRayBenchmarkTest(RayBenchmarkTest rayBenchmarkTest) {
this.rayBenchmarkTest = rayBenchmarkTest;
}
public RayActor getRayActor() {
return rayActor;
}
public void setRayActor(RayActor rayActor) {
this.rayActor = rayActor;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/PressureTestType.java | Java | package org.ray.api.benchmark;
public enum PressureTestType {
SINGLE_LATENCY,
RATE_LIMITER,
MAX
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/RateLimiterPressureTest.java | Java | package org.ray.api.benchmark;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.annotations.Test;
public class RateLimiterPressureTest extends RayBenchmarkTest {
public static final int clientNum = 2;
public static final int totalQps = 2;
public static final int duration = 10;
private static final long serialVersionUID = 6616958120966144235L;
@RayRemote
public static RemoteResult<Integer> currentTime() {
RemoteResult<Integer> remoteResult = new RemoteResult<>();
remoteResult.setFinishTime(System.nanoTime());
remoteResult.setResult(0);
return remoteResult;
}
@Test
public void test() {
PressureTestParameter pressureTestParameter = new PressureTestParameter();
pressureTestParameter.setClientNum(clientNum);
pressureTestParameter.setTotalQps(totalQps);
pressureTestParameter.setDuration(duration);
pressureTestParameter.setRayBenchmarkTest(this);
super.rateLimiterPressureTest(pressureTestParameter);
}
@Override
public RayObject<RemoteResult<Integer>> rayCall(RayActor rayActor) {
return Ray.call(RateLimiterPressureTest::currentTime);
}
@Override
public boolean checkResult(Object o) {
return (int) o == 0;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/RayBenchmarkTest.java | Java | package org.ray.api.benchmark;
import com.google.common.util.concurrent.RateLimiter;
import java.io.Serializable;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.ray.api.function.RayFunc1;
import org.ray.api.test.BaseTest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
public abstract class RayBenchmarkTest<T> extends BaseTest implements Serializable {
private static final Logger LOGGER = LoggerFactory.getLogger(RayBenchmarkTest.class);
//not thread safe ,but we only have one thread here
public static final DecimalFormat df = new DecimalFormat("00.00");
private static final long serialVersionUID = 416045641835782523L;
@RayRemote
private static List<Long> singleClient(PressureTestParameter pressureTestParameter) {
try {
List<Long> counterList = new ArrayList<>();
PressureTestType pressureTestType = pressureTestParameter.getPressureTestType();
RayBenchmarkTest rayBenchmarkTest = pressureTestParameter.getRayBenchmarkTest();
int clientNum = pressureTestParameter.getClientNum();
//SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
int len;
String logPrefix;
RateLimiter rateLimiter = null;
if (pressureTestType.equals(PressureTestType.MAX)) {
len = pressureTestParameter.getTotalNum() / clientNum;
logPrefix = "MAX";
} else {
int totalQps = pressureTestParameter.getTotalQps();
int duration = pressureTestParameter.getDuration();
int qps = totalQps / clientNum;
rateLimiter = RateLimiter.create(qps);
len = qps * duration;
logPrefix = "RATE_LIMITER";
}
RemoteResultWrapper[] remoteResultWrappers = new RemoteResultWrapper[len];
int i = 0;
while (i < len) {
if (rateLimiter != null) {
rateLimiter.acquire();
}
RemoteResultWrapper temp = new RemoteResultWrapper();
temp.setStartTime(System.nanoTime());
temp.setRayObject(rayBenchmarkTest.rayCall(pressureTestParameter.getRayActor()));
remoteResultWrappers[i++] = temp;
}
int j = 0;
while (j < len) {
RemoteResultWrapper temp = remoteResultWrappers[j++];
RemoteResult remoteResult = (RemoteResult) temp.getRayObject().get();
long endTime = remoteResult.getFinishTime();
long costTime = endTime - temp.getStartTime();
counterList.add(costTime / 1000);
LOGGER.warn("{}_cost_time:{}ns", logPrefix, costTime);
Assert.assertTrue(rayBenchmarkTest.checkResult(remoteResult.getResult()));
}
return counterList;
} catch (Exception e) {
LOGGER.error("singleClient", e);
return null;
}
}
public void singleLatencyTest(int times, RayActor rayActor) {
List<Long> counterList = new ArrayList<>();
for (int i = 0; i < times; i++) {
long startTime = System.nanoTime();
RayObject<RemoteResult<T>> rayObject = rayCall(rayActor);
RemoteResult<T> remoteResult = rayObject.get();
T t = remoteResult.getResult();
long endTime = System.nanoTime();
long costTime = endTime - startTime;
counterList.add(costTime / 1000);
LOGGER.warn("SINGLE_LATENCY_cost_time: {} us", costTime);
Assert.assertTrue(checkResult(t));
}
Collections.sort(counterList);
printList(counterList);
}
public abstract RayObject<RemoteResult<T>> rayCall(RayActor rayActor);
public abstract boolean checkResult(T t);
private void printList(List<Long> list) {
int len = list.size();
int middle = len / 2;
int almostHundred = (int) (len * 0.9999);
int ninetyNine = (int) (len * 0.99);
int ninetyFive = (int) (len * 0.95);
int ninety = (int) (len * 0.9);
int fifty = (int) (len * 0.5);
LOGGER.error("Final result of rt as below:");
LOGGER.error("max: {}μs", list.get(len - 1));
LOGGER.error("min: {}μs", list.get(0));
LOGGER.error("median: {}μs", list.get(middle));
LOGGER.error("99.99% data smaller than: {}μs", list.get(almostHundred));
LOGGER.error("99% data smaller than: {}μs", list.get(ninetyNine));
LOGGER.error("95% data smaller than: {}μs", list.get(ninetyFive));
LOGGER.error("90% data smaller than: {}μs", list.get(ninety));
LOGGER.error("50% data smaller than: {}μs", list.get(fifty));
}
public void rateLimiterPressureTest(PressureTestParameter pressureTestParameter) {
pressureTestParameter.setPressureTestType(PressureTestType.RATE_LIMITER);
notSinglePressTest(pressureTestParameter);
}
private void notSinglePressTest(PressureTestParameter pressureTestParameter) {
List<Long> counterList = new ArrayList<>();
int clientNum = pressureTestParameter.getClientNum();
RayObject<List<Long>>[] rayObjects = new RayObject[clientNum];
for (int i = 0; i < clientNum; i++) {
// Java compiler can't automatically infer the type of
// `RayBenchmarkTest::singleClient`, because `RayBenchmarkTest` is a generic class.
// It will match both `RayFunc1` and `RayFuncVoid1`. This looks like a bug or
// defect of the Java compiler.
// TODO(hchen): Figure out how to avoid manually declaring `RayFunc` type in this case.
RayFunc1<PressureTestParameter, List<Long>> func = RayBenchmarkTest::singleClient;
rayObjects[i] = Ray.call(func, pressureTestParameter);
}
for (int i = 0; i < clientNum; i++) {
List<Long> subCounterList = rayObjects[i].get();
Assert.assertNotNull(subCounterList);
counterList.addAll(subCounterList);
}
Collections.sort(counterList);
printList(counterList);
}
public void maxPressureTest(PressureTestParameter pressureTestParameter) {
pressureTestParameter.setPressureTestType(PressureTestType.MAX);
notSinglePressTest(pressureTestParameter);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/RemoteResult.java | Java | package org.ray.api.benchmark;
import java.io.Serializable;
public class RemoteResult<T> implements Serializable {
private static final long serialVersionUID = -3825949468039358540L;
private long finishTime;
private T result;
public long getFinishTime() {
return finishTime;
}
public void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public T getResult() {
return result;
}
public void setResult(T result) {
this.result = result;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/RemoteResultWrapper.java | Java | package org.ray.api.benchmark;
import org.ray.api.RayObject;
public class RemoteResultWrapper<T> {
private long startTime;
private RayObject<RemoteResult<T>> rayObject;
public long getStartTime() {
return startTime;
}
public void setStartTime(long startTime) {
this.startTime = startTime;
}
public RayObject<RemoteResult<T>> getRayObject() {
return rayObject;
}
public void setRayObject(RayObject<RemoteResult<T>> rayObject) {
this.rayObject = rayObject;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/benchmark/SingleLatencyTest.java | Java | package org.ray.api.benchmark;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.annotations.Test;
public class SingleLatencyTest extends RayBenchmarkTest {
public static final int totalNum = 10;
private static final long serialVersionUID = 3559601273941694468L;
@RayRemote
public static RemoteResult<Integer> doFunc() {
RemoteResult<Integer> remoteResult = new RemoteResult<>();
remoteResult.setResult(1);
return remoteResult;
}
@Test
public void test() {
super.singleLatencyTest(totalNum, null);
}
@Override
public RayObject<RemoteResult<Integer>> rayCall(RayActor rayActor) {
return Ray.call(SingleLatencyTest::doFunc);
}
@Override
public boolean checkResult(Object o) {
return (int) o == 1;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ActorReconstructionTest.java | Java | package org.ray.api.test;
import static org.ray.runtime.util.SystemUtil.pid;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.ray.api.Checkpointable;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.TestUtils;
import org.ray.api.annotation.RayRemote;
import org.ray.api.exception.RayActorException;
import org.ray.api.id.ActorId;
import org.ray.api.id.UniqueId;
import org.ray.api.options.ActorCreationOptions;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = {"directCall"})
public class ActorReconstructionTest extends BaseTest {
@RayRemote()
public static class Counter {
protected int value = 0;
private boolean wasCurrentActorReconstructed = false;
public Counter() {
wasCurrentActorReconstructed = Ray.getRuntimeContext().wasCurrentActorReconstructed();
}
public boolean wasCurrentActorReconstructed() {
return wasCurrentActorReconstructed;
}
public int increase() {
value += 1;
return value;
}
public int getPid() {
return pid();
}
}
public void testActorReconstruction() throws InterruptedException, IOException {
TestUtils.skipTestUnderSingleProcess();
ActorCreationOptions options =
new ActorCreationOptions.Builder().setMaxReconstructions(1).createActorCreationOptions();
RayActor<Counter> actor = Ray.createActor(Counter::new, options);
// Call increase 3 times.
for (int i = 0; i < 3; i++) {
Ray.call(Counter::increase, actor).get();
}
Assert.assertFalse(Ray.call(Counter::wasCurrentActorReconstructed, actor).get());
// Kill the actor process.
int pid = Ray.call(Counter::getPid, actor).get();
Runtime.getRuntime().exec("kill -9 " + pid);
// Wait for the actor to be killed.
TimeUnit.SECONDS.sleep(1);
// Try calling increase on this actor again and check the value is now 4.
int value = Ray.call(Counter::increase, actor).get();
Assert.assertEquals(value, options.useDirectCall ? 1 : 4);
Assert.assertTrue(Ray.call(Counter::wasCurrentActorReconstructed, actor).get());
// Kill the actor process again.
pid = Ray.call(Counter::getPid, actor).get();
Runtime.getRuntime().exec("kill -9 " + pid);
TimeUnit.SECONDS.sleep(1);
// Try calling increase on this actor again and this should fail.
try {
Ray.call(Counter::increase, actor).get();
Assert.fail("The above task didn't fail.");
} catch (RayActorException e) {
// We should receive a RayActorException because the actor is dead.
}
}
public static class CheckpointableCounter extends Counter implements Checkpointable {
private boolean resumedFromCheckpoint = false;
private boolean increaseCalled = false;
@Override
public int increase() {
increaseCalled = true;
return super.increase();
}
public boolean wasResumedFromCheckpoint() {
return resumedFromCheckpoint;
}
@Override
public boolean shouldCheckpoint(CheckpointContext checkpointContext) {
// Checkpoint the actor when value is increased to 3.
boolean shouldCheckpoint = increaseCalled && value == 3;
increaseCalled = false;
return shouldCheckpoint;
}
@Override
public void saveCheckpoint(ActorId actorId, UniqueId checkpointId) {
// In practice, user should save the checkpoint id and data to a persistent store.
// But for simplicity, we don't do that in this unit test.
}
@Override
public UniqueId loadCheckpoint(ActorId actorId, List<Checkpoint> availableCheckpoints) {
// Restore previous value and return checkpoint id.
this.value = 3;
this.resumedFromCheckpoint = true;
return availableCheckpoints.get(availableCheckpoints.size() - 1).checkpointId;
}
@Override
public void checkpointExpired(ActorId actorId, UniqueId checkpointId) {
}
}
public void testActorCheckpointing() throws IOException, InterruptedException {
TestUtils.skipTestUnderSingleProcess();
ActorCreationOptions options =
new ActorCreationOptions.Builder().setMaxReconstructions(1).createActorCreationOptions();
RayActor<CheckpointableCounter> actor = Ray.createActor(CheckpointableCounter::new, options);
// Call increase 3 times.
for (int i = 0; i < 3; i++) {
Ray.call(CheckpointableCounter::increase, actor).get();
}
// Assert that the actor wasn't resumed from a checkpoint.
Assert.assertFalse(Ray.call(CheckpointableCounter::wasResumedFromCheckpoint, actor).get());
int pid = Ray.call(CheckpointableCounter::getPid, actor).get();
Runtime.getRuntime().exec("kill -9 " + pid);
// Wait for the actor to be killed.
TimeUnit.SECONDS.sleep(1);
// Try calling increase on this actor again and check the value is now 4.
int value = Ray.call(CheckpointableCounter::increase, actor).get();
Assert.assertEquals(value, 4);
// Assert that the actor was resumed from a checkpoint.
Assert.assertTrue(Ray.call(CheckpointableCounter::wasResumedFromCheckpoint, actor).get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ActorTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.RayPyActor;
import org.ray.api.TestUtils;
import org.ray.api.TestUtils.LargeObject;
import org.ray.api.annotation.RayRemote;
import org.ray.api.exception.UnreconstructableException;
import org.ray.api.id.UniqueId;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = {"directCall"})
public class ActorTest extends BaseTest {
@RayRemote
public static class Counter {
private int value;
public Counter(int initValue) {
this.value = initValue;
}
public int getValue() {
return value;
}
public void increase(int delta) {
value += delta;
}
public int increaseAndGet(int delta) {
value += delta;
return value;
}
public int accessLargeObject(LargeObject largeObject) {
value += largeObject.data.length;
return value;
}
}
public void testCreateAndCallActor() {
// Test creating an actor from a constructor
RayActor<Counter> actor = Ray.createActor(Counter::new, 1);
Assert.assertNotEquals(actor.getId(), UniqueId.NIL);
// A java actor is not a python actor
Assert.assertFalse(actor instanceof RayPyActor);
// Test calling an actor
Assert.assertEquals(Integer.valueOf(1), Ray.call(Counter::getValue, actor).get());
Ray.call(Counter::increase, actor, 1);
Assert.assertEquals(Integer.valueOf(3), Ray.call(Counter::increaseAndGet, actor, 1).get());
}
/**
* Test getting a direct object (an object that is returned by a direct-call task) twice from the
* object store.
*
* Direct objects are stored in core worker's local memory. And it will be removed after the first
* get. To enable getting it twice, we cache the object in `RayObjectImpl`.
*
* NOTE(hchen): this test will run for non-direct actors as well, which doesn't have the above
* issue and should also succeed.
*/
public void testGetDirectObjectTwice() {
RayActor<Counter> actor = Ray.createActor(Counter::new, 1);
RayObject<Integer> result = Ray.call(Counter::getValue, actor);
Assert.assertEquals(result.get(), Integer.valueOf(1));
Assert.assertEquals(result.get(), Integer.valueOf(1));
// TODO(hchen): The following code will still fail, and can be fixed by using ref counting.
// Assert.assertEquals(Ray.get(result.getId()), Integer.valueOf(1));
}
public void testCallActorWithLargeObject() {
RayActor<Counter> actor = Ray.createActor(Counter::new, 1);
LargeObject largeObject = new LargeObject();
Assert.assertEquals(Integer.valueOf(largeObject.data.length + 1),
Ray.call(Counter::accessLargeObject, actor, largeObject).get());
}
@RayRemote
static Counter factory(int initValue) {
return new Counter(initValue);
}
public void testCreateActorFromFactory() {
// Test creating an actor from a factory method
RayActor<Counter> actor = Ray.createActor(ActorTest::factory, 1);
Assert.assertNotEquals(actor.getId(), UniqueId.NIL);
// Test calling an actor
Assert.assertEquals(Integer.valueOf(1), Ray.call(Counter::getValue, actor).get());
}
@RayRemote
static int testActorAsFirstParameter(RayActor<Counter> actor, int delta) {
RayObject<Integer> res = Ray.call(Counter::increaseAndGet, actor, delta);
return res.get();
}
@RayRemote
static int testActorAsSecondParameter(int delta, RayActor<Counter> actor) {
RayObject<Integer> res = Ray.call(Counter::increaseAndGet, actor, delta);
return res.get();
}
@RayRemote
static int testActorAsFieldOfParameter(List<RayActor<Counter>> actor, int delta) {
RayObject<Integer> res = Ray.call(Counter::increaseAndGet, actor.get(0), delta);
return res.get();
}
public void testPassActorAsParameter() {
RayActor<Counter> actor = Ray.createActor(Counter::new, 0);
Assert.assertEquals(Integer.valueOf(1),
Ray.call(ActorTest::testActorAsFirstParameter, actor, 1).get());
Assert.assertEquals(Integer.valueOf(11),
Ray.call(ActorTest::testActorAsSecondParameter, 10, actor).get());
Assert.assertEquals(Integer.valueOf(111),
Ray.call(ActorTest::testActorAsFieldOfParameter, Collections.singletonList(actor), 100)
.get());
}
public void testUnreconstructableActorObject() throws InterruptedException {
TestUtils.skipTestUnderSingleProcess();
// The UnreconstructableException is created by raylet.
// TODO (kfstorm): This should be supported by direct actor call.
TestUtils.skipTestIfDirectActorCallEnabled();
RayActor<Counter> counter = Ray.createActor(Counter::new, 100);
// Call an actor method.
RayObject value = Ray.call(Counter::getValue, counter);
Assert.assertEquals(100, value.get());
// Delete the object from the object store.
Ray.internal().free(ImmutableList.of(value.getId()), false, false);
// Wait until the object is deleted, because the above free operation is async.
while (true) {
Boolean result = TestUtils.getRuntime().getObjectStore()
.wait(ImmutableList.of(value.getId()), 1, 0).get(0);
if (!result) {
break;
}
TimeUnit.MILLISECONDS.sleep(100);
}
try {
// Try getting the object again, this should throw an UnreconstructableException.
// Use `Ray.get()` to bypass the cache in `RayObjectImpl`.
Ray.get(value.getId());
Assert.fail("This line should not be reachable.");
} catch (UnreconstructableException e) {
Assert.assertEquals(value.getId(), e.objectId);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/BaseMultiLanguageTest.java | Java | package org.ray.api.test;
import java.io.File;
import java.lang.ProcessBuilder.Redirect;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.ray.api.Ray;
import org.ray.runtime.util.NetworkUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.SkipException;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
public abstract class BaseMultiLanguageTest {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseMultiLanguageTest.class);
private static final String PLASMA_STORE_SOCKET_NAME = "/tmp/ray/test/plasma_store_socket";
private static final String RAYLET_SOCKET_NAME = "/tmp/ray/test/raylet_socket";
/**
* Execute an external command.
*
* @return Whether the command succeeded.
*/
private boolean executeCommand(List<String> command, int waitTimeoutSeconds,
Map<String, String> env) {
try {
LOGGER.info("Executing command: {}", String.join(" ", command));
ProcessBuilder processBuilder = new ProcessBuilder(command).redirectOutput(Redirect.INHERIT)
.redirectError(Redirect.INHERIT);
for (Entry<String, String> entry : env.entrySet()) {
processBuilder.environment().put(entry.getKey(), entry.getValue());
}
Process process = processBuilder.start();
process.waitFor(waitTimeoutSeconds, TimeUnit.SECONDS);
return process.exitValue() == 0;
} catch (Exception e) {
throw new RuntimeException("Error executing command " + String.join(" ", command), e);
}
}
private void checkMultiLanguageTestFlag() {
if (!"1".equals(System.getenv("ENABLE_MULTI_LANGUAGE_TESTS"))) {
LOGGER.info("Skip Multi-language tests because environment variable "
+ "ENABLE_MULTI_LANGUAGE_TESTS isn't set");
throw new SkipException("Skip test.");
}
}
@BeforeClass(alwaysRun = true)
public void setUp() {
checkMultiLanguageTestFlag();
// Delete existing socket files.
for (String socket : ImmutableList.of(RAYLET_SOCKET_NAME, PLASMA_STORE_SOCKET_NAME)) {
File file = new File(socket);
if (file.exists()) {
file.delete();
}
}
String nodeManagerPort = String.valueOf(NetworkUtil.getUnusedPort());
// jars in the `ray` wheel doesn't contains test classes, so we add test classes explicitly.
// Since mvn test classes contains `test` in path and bazel test classes is located at a jar
// with `test` included in the name, we can check classpath `test` to filter out test classes.
String classpath = Stream.of(System.getProperty("java.class.path").split(":"))
.filter(s -> !s.contains(" ") && s.contains("test"))
.collect(Collectors.joining(":"));
String workerOptions =
" -classpath " + classpath;
// Start ray cluster.
List<String> startCommand = ImmutableList.of(
"ray",
"start",
"--head",
"--redis-port=6379",
String.format("--plasma-store-socket-name=%s", PLASMA_STORE_SOCKET_NAME),
String.format("--raylet-socket-name=%s", RAYLET_SOCKET_NAME),
String.format("--node-manager-port=%s", nodeManagerPort),
"--load-code-from-local",
"--include-java",
"--java-worker-options=" + workerOptions
);
String numWorkersPerProcessJava = System
.getProperty("ray.raylet.config.num_workers_per_process_java");
if (!Strings.isNullOrEmpty(numWorkersPerProcessJava)) {
startCommand = ImmutableList.<String>builder().addAll(startCommand)
.add(String.format("--internal-config={\"num_workers_per_process_java\": %s}",
numWorkersPerProcessJava)).build();
}
if (!executeCommand(startCommand, 10, getRayStartEnv())) {
throw new RuntimeException("Couldn't start ray cluster.");
}
// Connect to the cluster.
Assert.assertNull(Ray.internal());
System.setProperty("ray.redis.address", "127.0.0.1:6379");
System.setProperty("ray.object-store.socket-name", PLASMA_STORE_SOCKET_NAME);
System.setProperty("ray.raylet.socket-name", RAYLET_SOCKET_NAME);
System.setProperty("ray.raylet.node-manager-port", nodeManagerPort);
Ray.init();
}
/**
* @return The environment variables needed for the `ray start` command.
*/
protected Map<String, String> getRayStartEnv() {
return ImmutableMap.of();
}
@AfterClass(alwaysRun = true)
public void tearDown() {
checkMultiLanguageTestFlag();
// Disconnect to the cluster.
Ray.shutdown();
System.clearProperty("ray.redis.address");
System.clearProperty("ray.object-store.socket-name");
System.clearProperty("ray.raylet.socket-name");
System.clearProperty("ray.raylet.node-manager-port");
// Stop ray cluster.
final List<String> stopCommand = ImmutableList.of(
"ray",
"stop"
);
if (!executeCommand(stopCommand, 10, ImmutableMap.of())) {
throw new RuntimeException("Couldn't stop ray cluster");
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/BaseTest.java | Java | package org.ray.api.test;
import java.io.File;
import java.lang.reflect.Method;
import java.util.List;
import com.google.common.collect.ImmutableList;
import org.ray.api.Ray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
public class BaseTest {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class);
private List<File> filesToDelete = ImmutableList.of();
@BeforeMethod(alwaysRun = true)
public void setUpBase(Method method) {
Assert.assertNull(Ray.internal());
Ray.init();
// These files need to be deleted after each test case.
filesToDelete = ImmutableList.of(
new File(Ray.getRuntimeContext().getRayletSocketName()),
new File(Ray.getRuntimeContext().getObjectStoreSocketName()),
// TODO(pcm): This is a workaround for the issue described
// in the PR description of https://github.com/ray-project/ray/pull/5450
// and should be fixed properly.
new File("/tmp/ray/test/raylet_socket")
);
// Make sure the files will be deleted even if the test doesn't exit gracefully.
filesToDelete.forEach(File::deleteOnExit);
}
@AfterMethod(alwaysRun = true)
public void tearDownBase() {
Ray.shutdown();
for (File file : filesToDelete) {
file.delete();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ClassLoaderTest.java | Java | package org.ray.api.test;
import java.io.File;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.nio.file.Paths;
import javax.tools.JavaCompiler;
import javax.tools.ToolProvider;
import org.apache.commons.io.FileUtils;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.options.ActorCreationOptions;
import org.ray.runtime.AbstractRayRuntime;
import org.ray.runtime.functionmanager.FunctionDescriptor;
import org.ray.runtime.functionmanager.JavaFunctionDescriptor;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class ClassLoaderTest extends BaseTest {
private final String resourcePath = FileUtils.getTempDirectoryPath()
+ "/ray_test/ClassLoaderTest";
@BeforeClass
public void setUp() {
// The potential issue of multiple `ClassLoader` instances for the same job on multi-threading
// scenario only occurs if the classes are loaded from the job resource path.
System.setProperty("ray.job.resource-path", resourcePath);
}
@AfterClass
public void tearDown() {
System.clearProperty("ray.job.resource-path");
}
@Test
public void testClassLoaderInMultiThreading() throws Exception {
TestUtils.skipTestUnderSingleProcess();
Assert.assertTrue(TestUtils.getRuntime().getRayConfig().numWorkersPerProcess > 1);
final String jobResourcePath = resourcePath + "/" + Ray.getRuntimeContext().getCurrentJobId();
File jobResourceDir = new File(jobResourcePath);
FileUtils.deleteQuietly(jobResourceDir);
jobResourceDir.mkdirs();
jobResourceDir.deleteOnExit();
// In this test case the class is expected to be loaded from the job resource path, so we need
// to put the compiled class file into the job resource path and load it later.
String testJavaFile = ""
+ "import java.lang.management.ManagementFactory;\n"
+ "import java.lang.management.RuntimeMXBean;\n"
+ "\n"
+ "public class ClassLoaderTester {\n"
+ "\n"
+ " static volatile int value;\n"
+ "\n"
+ " public int getPid() {\n"
+ " RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();\n"
+ " String name = runtime.getName();\n"
+ " int index = name.indexOf(\"@\");\n"
+ " if (index != -1) {\n"
+ " return Integer.parseInt(name.substring(0, index));\n"
+ " } else {\n"
+ " throw new RuntimeException(\"parse pid error:\" + name);\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " public int increase() throws InterruptedException {\n"
+ " return increaseInternal();\n"
+ " }\n"
+ "\n"
+ " public static synchronized int increaseInternal() throws InterruptedException {\n"
+ " int oldValue = value;\n"
+ " Thread.sleep(10 * 1000);\n"
+ " value = oldValue + 1;\n"
+ " return value;\n"
+ " }\n"
+ "\n"
+ " public int getClassLoaderHashCode() {\n"
+ " return this.getClass().getClassLoader().hashCode();\n"
+ " }\n"
+ "}";
// Write the demo java file to the job resource path.
String javaFilePath = jobResourcePath + "/ClassLoaderTester.java";
Files.write(Paths.get(javaFilePath), testJavaFile.getBytes());
// Compile the java file.
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
int result = compiler.run(null, null, null, "-d", jobResourcePath, javaFilePath);
if (result != 0) {
throw new RuntimeException("Couldn't compile ClassLoaderTester.java.");
}
FunctionDescriptor constructor = new JavaFunctionDescriptor("ClassLoaderTester", "<init>",
"()V");
RayActor<?> actor1 = createActor(constructor);
FunctionDescriptor getPid = new JavaFunctionDescriptor("ClassLoaderTester", "getPid", "()I");
int pid = this.<Integer>callActorFunction(actor1, getPid, new Object[0], 1).get();
RayActor<?> actor2;
while (true) {
// Create another actor which share the same process of actor 1.
actor2 = createActor(constructor);
int actor2Pid = this.<Integer>callActorFunction(actor2, getPid, new Object[0], 1).get();
if (actor2Pid == pid) {
break;
}
}
FunctionDescriptor getClassLoaderHashCode = new JavaFunctionDescriptor("ClassLoaderTester",
"getClassLoaderHashCode",
"()I");
RayObject<Integer> hashCode1 = callActorFunction(actor1, getClassLoaderHashCode, new Object[0],
1);
RayObject<Integer> hashCode2 = callActorFunction(actor2, getClassLoaderHashCode, new Object[0],
1);
Assert.assertEquals(hashCode1.get(), hashCode2.get());
FunctionDescriptor increase = new JavaFunctionDescriptor("ClassLoaderTester", "increase",
"()I");
RayObject<Integer> value1 = callActorFunction(actor1, increase, new Object[0], 1);
RayObject<Integer> value2 = callActorFunction(actor2, increase, new Object[0], 1);
Assert.assertNotEquals(value1.get(), value2.get());
}
private RayActor<?> createActor(FunctionDescriptor functionDescriptor)
throws Exception {
Method createActorMethod = AbstractRayRuntime.class.getDeclaredMethod("createActorImpl",
FunctionDescriptor.class, Object[].class, ActorCreationOptions.class);
createActorMethod.setAccessible(true);
return (RayActor<?>) createActorMethod
.invoke(TestUtils.getRuntime(), functionDescriptor, new Object[0], null);
}
private <T> RayObject<T> callActorFunction(RayActor<?> rayActor,
FunctionDescriptor functionDescriptor, Object[] args, int numReturns) throws Exception {
Method callActorFunctionMethod = AbstractRayRuntime.class.getDeclaredMethod("callActorFunction",
RayActor.class, FunctionDescriptor.class, Object[].class, int.class);
callActorFunctionMethod.setAccessible(true);
return (RayObject<T>) callActorFunctionMethod
.invoke(TestUtils.getRuntime(), rayActor, functionDescriptor, args, numReturns);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ClientExceptionTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.concurrent.TimeUnit;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.exception.RayException;
import org.ray.api.id.ObjectId;
import org.ray.runtime.RayNativeRuntime;
import org.ray.runtime.object.RayObjectImpl;
import org.ray.runtime.runner.RunManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class ClientExceptionTest extends BaseTest {
private static final Logger LOGGER = LoggerFactory.getLogger(ClientExceptionTest.class);
@Test
public void testWaitAndCrash() {
TestUtils.skipTestUnderSingleProcess();
ObjectId randomId = ObjectId.fromRandom();
RayObject<String> notExisting = new RayObjectImpl(randomId);
Thread thread = new Thread(() -> {
try {
TimeUnit.SECONDS.sleep(1);
// kill raylet
RunManager runManager = ((RayNativeRuntime) TestUtils.getRuntime()).getRunManager();
for (Process process : runManager.getProcesses("raylet")) {
runManager.terminateProcess("raylet", process);
}
} catch (InterruptedException e) {
LOGGER.error("Got InterruptedException when sleeping, exit right now.");
throw new RuntimeException("Got InterruptedException when sleeping.", e);
}
});
thread.start();
try {
Ray.wait(ImmutableList.of(notExisting), 1, 2000);
Assert.fail("Should not reach here");
} catch (RayException e) {
LOGGER.debug("Expected runtime exception: {}", e);
}
try {
thread.join();
} catch (Exception e) {
LOGGER.error("Excpetion caught: {}", e);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/CrossLanguageInvocationTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableMap;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.RayPyActor;
import org.ray.api.TestUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
public class CrossLanguageInvocationTest extends BaseMultiLanguageTest {
private static final String PYTHON_MODULE = "test_cross_language_invocation";
@Override
protected Map<String, String> getRayStartEnv() {
// Delete and re-create the temp dir.
File tempDir = new File(
System.getProperty("java.io.tmpdir") + File.separator + "ray_cross_language_test");
FileUtils.deleteQuietly(tempDir);
tempDir.mkdirs();
tempDir.deleteOnExit();
// Write the test Python file to the temp dir.
InputStream in = CrossLanguageInvocationTest.class
.getResourceAsStream("/" + PYTHON_MODULE + ".py");
File pythonFile = new File(
tempDir.getAbsolutePath() + File.separator + PYTHON_MODULE + ".py");
try {
FileUtils.copyInputStreamToFile(in, pythonFile);
} catch (IOException e) {
throw new RuntimeException(e);
}
return ImmutableMap.of("PYTHONPATH", tempDir.getAbsolutePath());
}
@Test
public void testCallingPythonFunction() {
RayObject res = Ray.callPy(PYTHON_MODULE, "py_func", "hello".getBytes());
Assert.assertEquals(res.get(), "Response from Python: hello".getBytes());
}
@Test(groups = {"directCall"})
public void testCallingPythonActor() {
// Python worker doesn't support direct call yet.
TestUtils.skipTestIfDirectActorCallEnabled();
RayPyActor actor = Ray.createPyActor(PYTHON_MODULE, "Counter", "1".getBytes());
RayObject res = Ray.callPy(actor, "increase", "1".getBytes());
Assert.assertEquals(res.get(), "2".getBytes());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/DynamicResourceTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.WaitResult;
import org.ray.api.annotation.RayRemote;
import org.ray.api.options.CallOptions;
import org.ray.api.runtimecontext.NodeInfo;
import org.testng.Assert;
import org.testng.annotations.Test;
public class DynamicResourceTest extends BaseTest {
@RayRemote
public static String sayHi() {
return "hi";
}
@Test
public void testSetResource() {
TestUtils.skipTestUnderSingleProcess();
// Call a task in advance to warm up the cluster to avoid being too slow to start workers.
TestUtils.warmUpCluster();
CallOptions op1 =
new CallOptions.Builder().setResources(ImmutableMap.of("A", 10.0)).createCallOptions();
RayObject<String> obj = Ray.call(DynamicResourceTest::sayHi, op1);
WaitResult<String> result = Ray.wait(ImmutableList.of(obj), 1, 1000);
Assert.assertEquals(result.getReady().size(), 0);
Ray.setResource("A", 10.0);
boolean resourceReady = TestUtils.waitForCondition(() -> {
List<NodeInfo> nodes = Ray.getRuntimeContext().getAllNodeInfo();
if (nodes.size() != 1) {
return false;
}
return (0 == Double.compare(10.0, nodes.get(0).resources.get("A")));
}, 2000);
Assert.assertTrue(resourceReady);
// Assert ray call result.
result = Ray.wait(ImmutableList.of(obj), 1, 1000);
Assert.assertEquals(result.getReady().size(), 1);
Assert.assertEquals(Ray.get(obj.getId()), "hi");
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/FailureTest.java | Java | package org.ray.api.test;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.exception.RayActorException;
import org.ray.api.exception.RayException;
import org.ray.api.exception.RayTaskException;
import org.ray.api.exception.RayWorkerException;
import org.ray.api.function.RayFunc0;
import org.testng.Assert;
import org.testng.annotations.Test;
public class FailureTest extends BaseTest {
private static final String EXCEPTION_MESSAGE = "Oops";
public static int badFunc() {
throw new RuntimeException(EXCEPTION_MESSAGE);
}
public static int badFunc2() {
System.exit(-1);
return 0;
}
public static int slowFunc() {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return 0;
}
public static class BadActor {
public BadActor(boolean failOnCreation) {
if (failOnCreation) {
throw new RuntimeException(EXCEPTION_MESSAGE);
}
}
public int badMethod() {
throw new RuntimeException(EXCEPTION_MESSAGE);
}
public int badMethod2() {
System.exit(-1);
return 0;
}
}
private static void assertTaskFailedWithRayTaskException(RayObject<?> rayObject) {
try {
rayObject.get();
Assert.fail("Task didn't fail.");
} catch (RayTaskException e) {
Throwable rootCause = e.getCause();
while (rootCause.getCause() != null) {
rootCause = rootCause.getCause();
}
Assert.assertTrue(rootCause instanceof RuntimeException);
Assert.assertEquals(rootCause.getMessage(), EXCEPTION_MESSAGE);
}
}
@Test
public void testNormalTaskFailure() {
TestUtils.skipTestUnderSingleProcess();
assertTaskFailedWithRayTaskException(Ray.call(FailureTest::badFunc));
}
@Test(groups = {"directCall"})
public void testActorCreationFailure() {
TestUtils.skipTestUnderSingleProcess();
RayActor<BadActor> actor = Ray.createActor(BadActor::new, true);
assertTaskFailedWithRayTaskException(Ray.call(BadActor::badMethod, actor));
}
@Test(groups = {"directCall"})
public void testActorTaskFailure() {
TestUtils.skipTestUnderSingleProcess();
RayActor<BadActor> actor = Ray.createActor(BadActor::new, false);
assertTaskFailedWithRayTaskException(Ray.call(BadActor::badMethod, actor));
}
@Test
public void testWorkerProcessDying() {
TestUtils.skipTestUnderSingleProcess();
try {
Ray.call(FailureTest::badFunc2).get();
Assert.fail("This line shouldn't be reached.");
} catch (RayWorkerException e) {
// When the worker process dies while executing a task, we should receive an
// RayWorkerException.
}
}
@Test(groups = {"directCall"})
public void testActorProcessDying() {
TestUtils.skipTestUnderSingleProcess();
// This test case hangs if the worker to worker connection is implemented with grpc.
// TODO (kfstorm): Should be fixed.
TestUtils.skipTestIfDirectActorCallEnabled();
RayActor<BadActor> actor = Ray.createActor(BadActor::new, false);
try {
Ray.call(BadActor::badMethod2, actor).get();
Assert.fail("This line shouldn't be reached.");
} catch (RayActorException e) {
// When the actor process dies while executing a task, we should receive an
// RayActorException.
}
try {
Ray.call(BadActor::badMethod, actor).get();
Assert.fail("This line shouldn't be reached.");
} catch (RayActorException e) {
// When a actor task is submitted to a dead actor, we should also receive an
// RayActorException.
}
}
@Test
public void testGetThrowsQuicklyWhenFoundException() {
TestUtils.skipTestUnderSingleProcess();
List<RayFunc0<Integer>> badFunctions = Arrays.asList(FailureTest::badFunc,
FailureTest::badFunc2);
TestUtils.warmUpCluster();
for (RayFunc0<Integer> badFunc : badFunctions) {
RayObject<Integer> obj1 = Ray.call(badFunc);
RayObject<Integer> obj2 = Ray.call(FailureTest::slowFunc);
Instant start = Instant.now();
try {
Ray.get(Arrays.asList(obj1.getId(), obj2.getId()));
Assert.fail("Should throw RayException.");
} catch (RayException e) {
Instant end = Instant.now();
long duration = Duration.between(start, end).toMillis();
Assert.assertTrue(duration < 5000, "Should fail quickly. " +
"Actual execution time: " + duration + " ms.");
}
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/GcsClientTest.java | Java | package org.ray.api.test;
import com.google.common.base.Preconditions;
import java.util.List;
import org.ray.api.TestUtils;
import org.ray.api.id.JobId;
import org.ray.api.runtimecontext.NodeInfo;
import org.ray.runtime.config.RayConfig;
import org.ray.runtime.gcs.GcsClient;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class GcsClientTest extends BaseTest {
@BeforeClass
public void setUp() {
System.setProperty("ray.resources", "A:8");
}
@AfterClass
public void tearDown() {
System.clearProperty("ray.resources");
}
@Test
public void testGetAllNodeInfo() {
TestUtils.skipTestUnderSingleProcess();
RayConfig config = TestUtils.getRuntime().getRayConfig();
Preconditions.checkNotNull(config);
GcsClient gcsClient = TestUtils.getRuntime().getGcsClient();
List<NodeInfo> allNodeInfo = gcsClient.getAllNodeInfo();
Assert.assertEquals(allNodeInfo.size(), 1);
Assert.assertEquals(allNodeInfo.get(0).nodeAddress, config.nodeIp);
Assert.assertTrue(allNodeInfo.get(0).isAlive);
Assert.assertEquals(allNodeInfo.get(0).resources.get("A"), 8.0);
}
@Test
public void testNextJob() {
TestUtils.skipTestUnderSingleProcess();
RayConfig config = TestUtils.getRuntime().getRayConfig();
// The value of job id of this driver in cluster should be 1.
Assert.assertEquals(config.getJobId(), JobId.fromInt(1));
GcsClient gcsClient = TestUtils.getRuntime().getGcsClient();
for (int i = 2; i < 100; ++i) {
Assert.assertEquals(gcsClient.nextJobId(), JobId.fromInt(i));
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/HelloWorldTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Hello world.
*/
public class HelloWorldTest extends BaseTest {
@RayRemote
private static String hello() {
return "hello";
}
@RayRemote
private static String world() {
return "world!";
}
@RayRemote
private static String merge(String hello, String world) {
return hello + "," + world;
}
@Test
public void testHelloWorld() {
RayObject<String> hello = Ray.call(HelloWorldTest::hello);
RayObject<String> world = Ray.call(HelloWorldTest::world);
String helloWorld = Ray.call(HelloWorldTest::merge, hello, world).get();
Assert.assertEquals("hello,world!", helloWorld);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/KillActorTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.annotation.RayRemote;
import org.ray.api.exception.RayActorException;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = { "directCall" })
public class KillActorTest extends BaseTest {
@RayRemote
public static class HangActor {
public boolean alive() {
return true;
}
public boolean hang() throws InterruptedException {
while (true) {
Thread.sleep(1000);
}
}
}
public void testKillActor() {
TestUtils.skipTestUnderSingleProcess();
TestUtils.skipTestIfDirectActorCallDisabled();
RayActor<HangActor> actor = Ray.createActor(HangActor::new);
Assert.assertTrue(Ray.call(HangActor::alive, actor).get());
RayObject<Boolean> result = Ray.call(HangActor::hang, actor);
Assert.assertEquals(0, Ray.wait(ImmutableList.of(result), 1, 500).getReady().size());
Ray.killActor(actor);
Assert.expectThrows(RayActorException.class, result::get);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/MultiLanguageClusterTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.Assert;
import org.testng.annotations.Test;
public class MultiLanguageClusterTest extends BaseMultiLanguageTest {
@RayRemote
public static String echo(String word) {
return word;
}
@Test
public void testMultiLanguageCluster() {
RayObject<String> obj = Ray.call(MultiLanguageClusterTest::echo, "hello");
Assert.assertEquals("hello", obj.get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/MultiThreadingTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.WaitResult;
import org.ray.api.annotation.RayRemote;
import org.ray.api.id.ActorId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = {"directCall"})
public class MultiThreadingTest extends BaseTest {
private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadingTest.class);
private static final int LOOP_COUNTER = 100;
private static final int NUM_THREADS = 20;
@RayRemote
static Integer echo(int num) {
return num;
}
@RayRemote
public static class Echo {
@RayRemote
public Integer echo(int num) {
return num;
}
}
@RayRemote
public static class ActorIdTester {
private final ActorId actorId;
public ActorIdTester() {
actorId = Ray.getRuntimeContext().getCurrentActorId();
Assert.assertNotEquals(actorId, ActorId.NIL);
}
@RayRemote
public ActorId getCurrentActorId() throws Exception {
final Object[] result = new Object[1];
Thread thread = new Thread(Ray.wrapRunnable(() -> {
try {
result[0] = Ray.getRuntimeContext().getCurrentActorId();
} catch (Exception e) {
result[0] = e;
}
}));
thread.start();
thread.join();
if (result[0] instanceof Exception) {
throw (Exception) result[0];
}
Assert.assertEquals(result[0], actorId);
return (ActorId) result[0];
}
}
static String testMultiThreading() {
Random random = new Random();
// Test calling normal functions.
runTestCaseInMultipleThreads(() -> {
int arg = random.nextInt();
RayObject<Integer> obj = Ray.call(MultiThreadingTest::echo, arg);
Assert.assertEquals(arg, (int) obj.get());
}, LOOP_COUNTER);
// Test calling actors.
RayActor<Echo> echoActor = Ray.createActor(Echo::new);
runTestCaseInMultipleThreads(() -> {
int arg = random.nextInt();
RayObject<Integer> obj = Ray.call(Echo::echo, echoActor, arg);
Assert.assertEquals(arg, (int) obj.get());
}, LOOP_COUNTER);
// Test creating multi actors
runTestCaseInMultipleThreads(() -> {
int arg = random.nextInt();
RayActor<Echo> echoActor1 = Ray.createActor(Echo::new);
try {
// Sleep a while to test the case that another actor is created before submitting
// tasks to this actor.
TimeUnit.MILLISECONDS.sleep(10);
} catch (InterruptedException e) {
LOGGER.warn("Got exception while sleeping.", e);
}
RayObject<Integer> obj = Ray.call(Echo::echo, echoActor1, arg);
Assert.assertEquals(arg, (int) obj.get());
}, 1);
// Test put and get.
runTestCaseInMultipleThreads(() -> {
int arg = random.nextInt();
RayObject<Integer> obj = Ray.put(arg);
Assert.assertEquals(arg, (int) Ray.get(obj.getId()));
}, LOOP_COUNTER);
TestUtils.warmUpCluster();
// Test wait for one object in multi threads.
RayObject<Integer> obj = Ray.call(MultiThreadingTest::echo, 100);
runTestCaseInMultipleThreads(() -> {
WaitResult<Integer> result = Ray.wait(ImmutableList.of(obj), 1, 1000);
Assert.assertEquals(1, result.getReady().size());
}, 1);
return "ok";
}
public void testInDriver() {
testMultiThreading();
}
public void testInWorker() {
// Single-process mode doesn't have real workers.
TestUtils.skipTestUnderSingleProcess();
RayObject<String> obj = Ray.call(MultiThreadingTest::testMultiThreading);
Assert.assertEquals("ok", obj.get());
}
public void testGetCurrentActorId() {
TestUtils.skipTestUnderSingleProcess();
RayActor<ActorIdTester> actorIdTester = Ray.createActor(ActorIdTester::new);
ActorId actorId = Ray.call(ActorIdTester::getCurrentActorId, actorIdTester).get();
Assert.assertEquals(actorId, actorIdTester.getId());
}
private static void runTestCaseInMultipleThreads(Runnable testCase, int numRepeats) {
ExecutorService service = Executors.newFixedThreadPool(NUM_THREADS);
try {
List<Future<String>> futures = new ArrayList<>();
for (int i = 0; i < NUM_THREADS; i++) {
Callable<String> task = Ray.wrapCallable(() -> {
for (int j = 0; j < numRepeats; j++) {
TimeUnit.MILLISECONDS.sleep(1);
testCase.run();
}
return "ok";
});
futures.add(service.submit(task));
}
for (Future<String> future : futures) {
try {
Assert.assertEquals(future.get(), "ok");
} catch (Exception e) {
throw new RuntimeException("Test case failed.", e);
}
}
} finally {
service.shutdown();
}
}
private static boolean testGetAsyncContextAndSetAsyncContext() throws Exception {
final Object asyncContext = Ray.getAsyncContext();
final Object[] result = new Object[1];
Thread thread = new Thread(() -> {
try {
Ray.setAsyncContext(asyncContext);
Ray.put(0);
} catch (Exception e) {
result[0] = e;
}
});
thread.start();
thread.join();
if (result[0] instanceof Exception) {
throw (Exception) result[0];
}
return true;
}
public void testGetAsyncContextAndSetAsyncContextInDriver() throws Exception {
Assert.assertTrue(testGetAsyncContextAndSetAsyncContext());
}
public void testGetAsyncContextAndSetAsyncContextInWorker() {
RayObject<Boolean> obj = Ray.call(MultiThreadingTest::testGetAsyncContextAndSetAsyncContext);
Assert.assertTrue(obj.get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ObjectStoreTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.stream.Collectors;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.id.ObjectId;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Test putting and getting objects.
*/
public class ObjectStoreTest extends BaseTest {
@Test
public void testPutAndGet() {
RayObject<Integer> obj = Ray.put(1);
Assert.assertEquals(1, (int) obj.get());
}
@Test
public void testGetMultipleObjects() {
List<Integer> ints = ImmutableList.of(1, 2, 3, 4, 5);
List<ObjectId> ids = ints.stream().map(obj -> Ray.put(obj).getId())
.collect(Collectors.toList());
Assert.assertEquals(ints, Ray.get(ids));
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/PlasmaFreeTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.Arrays;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.annotation.RayRemote;
import org.ray.api.id.TaskId;
import org.testng.Assert;
import org.testng.annotations.Test;
public class PlasmaFreeTest extends BaseTest {
@RayRemote
private static String hello() {
return "hello";
}
@Test
public void testDeleteObjects() {
RayObject<String> helloId = Ray.call(PlasmaFreeTest::hello);
String helloString = helloId.get();
Assert.assertEquals("hello", helloString);
Ray.internal().free(ImmutableList.of(helloId.getId()), true, false);
final boolean result = TestUtils.waitForCondition(() ->
TestUtils.getRuntime().getObjectStore()
.wait(ImmutableList.of(helloId.getId()), 1, 0).get(0) == false, 50);
Assert.assertTrue(result);
}
@Test
public void testDeleteCreatingTasks() {
TestUtils.skipTestUnderSingleProcess();
RayObject<String> helloId = Ray.call(PlasmaFreeTest::hello);
Assert.assertEquals("hello", helloId.get());
Ray.internal().free(ImmutableList.of(helloId.getId()), true, true);
TaskId taskId = TaskId.fromBytes(Arrays.copyOf(helloId.getId().getBytes(), TaskId.LENGTH));
final boolean result = TestUtils.waitForCondition(
() -> !TestUtils.getRuntime().getGcsClient()
.rayletTaskExistsInGcs(taskId), 50);
Assert.assertTrue(result);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/PlasmaStoreTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.TestUtils;
import org.ray.api.id.ObjectId;
import org.ray.runtime.object.ObjectStore;
import org.testng.Assert;
import org.testng.annotations.Test;
public class PlasmaStoreTest extends BaseTest {
@Test
public void testPutWithDuplicateId() {
TestUtils.skipTestUnderSingleProcess();
ObjectId objectId = ObjectId.fromRandom();
ObjectStore objectStore = TestUtils.getRuntime().getObjectStore();
objectStore.put("1", objectId);
Assert.assertEquals(Ray.get(objectId), "1");
objectStore.put("2", objectId);
// Putting the second object with duplicate ID should fail but ignored.
Assert.assertEquals(Ray.get(objectId), "1");
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RayCallTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.List;
import java.util.Map;
import org.ray.api.Ray;
import org.ray.api.TestUtils;
import org.ray.api.TestUtils.LargeObject;
import org.ray.api.annotation.RayRemote;
import org.ray.api.id.ObjectId;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Test Ray.call API
*/
public class RayCallTest extends BaseTest {
@RayRemote
private static int testInt(int val) {
return val;
}
@RayRemote
private static byte testByte(byte val) {
return val;
}
@RayRemote
private static short testShort(short val) {
return val;
}
@RayRemote
private static long testLong(long val) {
return val;
}
@RayRemote
private static double testDouble(double val) {
return val;
}
@RayRemote
private static float testFloat(float val) {
return val;
}
@RayRemote
private static boolean testBool(boolean val) {
return val;
}
@RayRemote
private static String testString(String val) {
return val;
}
@RayRemote
private static List<Integer> testList(List<Integer> val) {
return val;
}
@RayRemote
private static Map<String, Integer> testMap(Map<String, Integer> val) {
return val;
}
@RayRemote
private static LargeObject testLargeObject(LargeObject largeObject) {
return largeObject;
}
@RayRemote
private static void testNoReturn(ObjectId objectId) {
// Put an object in object store to inform driver that this function is executing.
TestUtils.getRuntime().getObjectStore().put(1, objectId);
}
/**
* Test calling and returning different types.
*/
@Test
public void testType() {
Assert.assertEquals(1, (int) Ray.call(RayCallTest::testInt, 1).get());
Assert.assertEquals(1, (byte) Ray.call(RayCallTest::testByte, (byte) 1).get());
Assert.assertEquals(1, (short) Ray.call(RayCallTest::testShort, (short) 1).get());
Assert.assertEquals(1, (long) Ray.call(RayCallTest::testLong, 1L).get());
Assert.assertEquals(1.0, Ray.call(RayCallTest::testDouble, 1.0).get(), 0.0);
Assert.assertEquals(1.0f, Ray.call(RayCallTest::testFloat, 1.0f).get(), 0.0);
Assert.assertTrue(Ray.call(RayCallTest::testBool, true).get());
Assert.assertEquals("foo", Ray.call(RayCallTest::testString, "foo").get());
List<Integer> list = ImmutableList.of(1, 2, 3);
Assert.assertEquals(list, Ray.call(RayCallTest::testList, list).get());
Map<String, Integer> map = ImmutableMap.of("1", 1, "2", 2);
Assert.assertEquals(map, Ray.call(RayCallTest::testMap, map).get());
LargeObject largeObject = new LargeObject();
Assert.assertNotNull(Ray.call(RayCallTest::testLargeObject, largeObject).get());
ObjectId randomObjectId = ObjectId.fromRandom();
Ray.call(RayCallTest::testNoReturn, randomObjectId);
Assert.assertEquals(((int) Ray.get(randomObjectId)), 1);
}
@RayRemote
private static int testNoParam() {
return 0;
}
@RayRemote
private static int testOneParam(int a) {
return a;
}
@RayRemote
private static int testTwoParams(int a, int b) {
return a + b;
}
@RayRemote
private static int testThreeParams(int a, int b, int c) {
return a + b + c;
}
@RayRemote
private static int testFourParams(int a, int b, int c, int d) {
return a + b + c + d;
}
@RayRemote
private static int testFiveParams(int a, int b, int c, int d, int e) {
return a + b + c + d + e;
}
@RayRemote
private static int testSixParams(int a, int b, int c, int d, int e, int f) {
return a + b + c + d + e + f;
}
@Test
public void testNumberOfParameters() {
Assert.assertEquals(0, (int) Ray.call(RayCallTest::testNoParam).get());
Assert.assertEquals(1, (int) Ray.call(RayCallTest::testOneParam, 1).get());
Assert.assertEquals(2, (int) Ray.call(RayCallTest::testTwoParams, 1, 1).get());
Assert.assertEquals(3, (int) Ray.call(RayCallTest::testThreeParams, 1, 1, 1).get());
Assert.assertEquals(4, (int) Ray.call(RayCallTest::testFourParams, 1, 1, 1, 1).get());
Assert.assertEquals(5, (int) Ray.call(RayCallTest::testFiveParams, 1, 1, 1, 1, 1).get());
Assert.assertEquals(6, (int) Ray.call(RayCallTest::testSixParams, 1, 1, 1, 1, 1, 1).get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RayConfigTest.java | Java | package org.ray.api.test;
import org.ray.runtime.config.RayConfig;
import org.ray.runtime.generated.Common.WorkerType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class RayConfigTest {
@Test
public void testCreateRayConfig() {
try {
System.setProperty("ray.job.resource-path", "path/to/ray/job/resource/path");
RayConfig rayConfig = RayConfig.create();
Assert.assertEquals(WorkerType.DRIVER, rayConfig.workerMode);
Assert.assertEquals("path/to/ray/job/resource/path", rayConfig.jobResourcePath);
} finally {
// Unset system properties.
System.clearProperty("ray.job.resource-path");
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RayMethodsTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.stream.Collectors;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.WaitResult;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Integration test for Ray.*
*/
public class RayMethodsTest extends BaseTest {
@Test
public void test() {
RayObject<Integer> i1Id = Ray.put(1);
RayObject<Double> f1Id = Ray.put(3.14);
RayObject<String> s1Id = Ray.put(String.valueOf("Hello "));
RayObject<String> s2Id = Ray.put(String.valueOf("World!"));
RayObject<Object> n1Id = Ray.put(null);
WaitResult<String> res = Ray.wait(ImmutableList.of(s1Id, s2Id), 2, 1000);
List<String> ss = res.getReady().stream().map(RayObject::get).collect(Collectors.toList());
int i1 = i1Id.get();
double f1 = f1Id.get();
Object n1 = n1Id.get();
Assert.assertEquals("Hello World!", ss.get(0) + ss.get(1));
Assert.assertEquals(1, i1);
Assert.assertEquals(3.14, f1, Double.MIN_NORMAL);
Assert.assertNull(n1);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RaySerializerTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayPyActor;
import org.ray.api.TestUtils;
import org.ray.runtime.context.WorkerContext;
import org.ray.runtime.object.NativeRayObject;
import org.ray.runtime.object.ObjectSerializer;
import org.testng.Assert;
import org.testng.annotations.Test;
public class RaySerializerTest extends BaseMultiLanguageTest {
@Test
public void testSerializePyActor() {
RayPyActor pyActor = Ray.createPyActor("test", "RaySerializerTest");
WorkerContext workerContext = TestUtils.getRuntime().getWorkerContext();
NativeRayObject nativeRayObject = ObjectSerializer.serialize(pyActor);
RayPyActor result = (RayPyActor) ObjectSerializer
.deserialize(nativeRayObject, null, workerContext.getCurrentClassLoader());
Assert.assertEquals(result.getId(), pyActor.getId());
Assert.assertEquals(result.getModuleName(), "test");
Assert.assertEquals(result.getClassName(), "RaySerializerTest");
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RedisPasswordTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class RedisPasswordTest extends BaseTest {
@BeforeClass
public void setUp() {
System.setProperty("ray.redis.head-password", "12345678");
System.setProperty("ray.redis.password", "12345678");
}
@AfterClass
public void tearDown() {
System.clearProperty("ray.redis.head-password");
System.clearProperty("ray.redis.password");
}
@RayRemote
public static String echo(String str) {
return str;
}
@Test
public void testRedisPassword() {
RayObject<String> obj = Ray.call(RedisPasswordTest::echo, "hello");
Assert.assertEquals("hello", obj.get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/ResourcesManagementTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.WaitResult;
import org.ray.api.annotation.RayRemote;
import org.ray.api.options.ActorCreationOptions;
import org.ray.api.options.CallOptions;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Resources Management Test.
*/
public class ResourcesManagementTest extends BaseTest {
@BeforeClass
public void setUp() {
System.setProperty("ray.resources", "CPU:4,RES-A:4");
}
@AfterClass
public void tearDown() {
System.clearProperty("ray.resources");
}
@RayRemote
public static Integer echo(Integer number) {
return number;
}
@RayRemote
public static class Echo {
public Integer echo(Integer number) {
return number;
}
}
@Test
public void testMethods() {
TestUtils.skipTestUnderSingleProcess();
CallOptions callOptions1 =
new CallOptions.Builder().setResources(ImmutableMap.of("CPU", 4.0)).createCallOptions();
// This is a case that can satisfy required resources.
// The static resources for test are "CPU:4,RES-A:4".
RayObject<Integer> result1 = Ray.call(ResourcesManagementTest::echo, 100, callOptions1);
Assert.assertEquals(100, (int) result1.get());
CallOptions callOptions2 =
new CallOptions.Builder().setResources(ImmutableMap.of("CPU", 4.0)).createCallOptions();
// This is a case that can't satisfy required resources.
// The static resources for test are "CPU:4,RES-A:4".
final RayObject<Integer> result2 = Ray.call(ResourcesManagementTest::echo, 200, callOptions2);
WaitResult<Integer> waitResult = Ray.wait(ImmutableList.of(result2), 1, 1000);
Assert.assertEquals(1, waitResult.getReady().size());
Assert.assertEquals(0, waitResult.getUnready().size());
try {
CallOptions callOptions3 =
new CallOptions.Builder().setResources(ImmutableMap.of("CPU", 0.0)).createCallOptions();
Assert.fail();
} catch (RuntimeException e) {
// We should receive a RuntimeException indicates that we should not
// pass a zero capacity resource.
}
}
@Test
public void testActors() {
TestUtils.skipTestUnderSingleProcess();
ActorCreationOptions actorCreationOptions1 = new ActorCreationOptions.Builder()
.setResources(ImmutableMap.of("CPU", 2.0)).createActorCreationOptions();
// This is a case that can satisfy required resources.
// The static resources for test are "CPU:4,RES-A:4".
RayActor<Echo> echo1 = Ray.createActor(Echo::new, actorCreationOptions1);
final RayObject<Integer> result1 = Ray.call(Echo::echo, echo1, 100);
Assert.assertEquals(100, (int) result1.get());
// This is a case that can't satisfy required resources.
// The static resources for test are "CPU:4,RES-A:4".
ActorCreationOptions actorCreationOptions2 = new ActorCreationOptions.Builder()
.setResources(ImmutableMap.of("CPU", 8.0)).createActorCreationOptions();
RayActor<ResourcesManagementTest.Echo> echo2 =
Ray.createActor(Echo::new, actorCreationOptions2);
final RayObject<Integer> result2 = Ray.call(Echo::echo, echo2, 100);
WaitResult<Integer> waitResult = Ray.wait(ImmutableList.of(result2), 1, 1000);
Assert.assertEquals(0, waitResult.getReady().size());
Assert.assertEquals(1, waitResult.getUnready().size());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/RuntimeContextTest.java | Java | package org.ray.api.test;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.annotation.RayRemote;
import org.ray.api.id.ActorId;
import org.ray.api.id.JobId;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class RuntimeContextTest extends BaseTest {
private static JobId JOB_ID = getJobId();
private static String RAYLET_SOCKET_NAME = "/tmp/ray/test/raylet_socket";
private static String OBJECT_STORE_SOCKET_NAME = "/tmp/ray/test/object_store_socket";
private static JobId getJobId() {
// Must be stable across different processes.
byte[] bytes = new byte[JobId.LENGTH];
Arrays.fill(bytes, (byte) 127);
return JobId.fromByteBuffer(ByteBuffer.wrap(bytes));
}
@BeforeClass
public void setUp() {
System.setProperty("ray.job.id", JOB_ID.toString());
System.setProperty("ray.raylet.socket-name", RAYLET_SOCKET_NAME);
System.setProperty("ray.object-store.socket-name", OBJECT_STORE_SOCKET_NAME);
}
@AfterClass
public void tearDown() {
System.clearProperty("ray.job.id");
System.clearProperty("ray.raylet.socket-name");
System.clearProperty("ray.object-store.socket-name");
}
@Test
public void testRuntimeContextInDriver() {
Assert.assertEquals(JOB_ID, Ray.getRuntimeContext().getCurrentJobId());
Assert.assertEquals(RAYLET_SOCKET_NAME, Ray.getRuntimeContext().getRayletSocketName());
Assert.assertEquals(OBJECT_STORE_SOCKET_NAME,
Ray.getRuntimeContext().getObjectStoreSocketName());
}
@RayRemote
public static class RuntimeContextTester {
public String testRuntimeContext(ActorId actorId) {
Assert.assertEquals(JOB_ID, Ray.getRuntimeContext().getCurrentJobId());
Assert.assertEquals(actorId, Ray.getRuntimeContext().getCurrentActorId());
Assert.assertEquals(RAYLET_SOCKET_NAME, Ray.getRuntimeContext().getRayletSocketName());
Assert.assertEquals(OBJECT_STORE_SOCKET_NAME,
Ray.getRuntimeContext().getObjectStoreSocketName());
return "ok";
}
}
@Test
public void testRuntimeContextInActor() {
RayActor<RuntimeContextTester> actor = Ray.createActor(RuntimeContextTester::new);
Assert.assertEquals("ok",
Ray.call(RuntimeContextTester::testRuntimeContext, actor, actor.getId()).get());
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/SingleProcessModeTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.annotation.RayRemote;
import org.ray.api.id.ActorId;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SingleProcessModeTest extends BaseTest {
private final static int NUM_ACTOR_INSTANCE = 10;
private final static int TIMES_TO_CALL_PER_ACTOR = 10;
@RayRemote
static class MyActor {
public MyActor() {
}
public long getThreadId() {
return Thread.currentThread().getId();
}
}
@Test
public void testActorTasksInOneThread() {
TestUtils.skipTestUnderClusterMode();
List<RayActor<MyActor>> actors = new ArrayList<>();
Map<ActorId, Long> actorThreadIds = new HashMap<>();
for (int i = 0; i < NUM_ACTOR_INSTANCE; ++i) {
RayActor<MyActor> actor = Ray.createActor(MyActor::new);
actors.add(actor);
actorThreadIds.put(actor.getId(), Ray.call(MyActor::getThreadId, actor).get());
}
Map<ActorId, List<RayObject<Long>>> allResults = new HashMap<>();
for (int i = 0; i < NUM_ACTOR_INSTANCE; ++i) {
final RayActor<MyActor> actor = actors.get(i);
List<RayObject<Long>> thisActorResult = new ArrayList<>();
for (int j = 0; j < TIMES_TO_CALL_PER_ACTOR; ++j) {
thisActorResult.add(Ray.call(MyActor::getThreadId, actor));
}
allResults.put(actor.getId(), thisActorResult);
}
// check result.
for (int i = 0; i < NUM_ACTOR_INSTANCE; ++i) {
final RayActor<MyActor> actor = actors.get(i);
final List<RayObject<Long>> thisActorResult = allResults.get(actor.getId());
// assert
for (RayObject<Long> threadId : thisActorResult) {
Assert.assertEquals(threadId.get(), actorThreadIds.get(actor.getId()));
}
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/StressTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.id.ObjectId;
import org.testng.Assert;
import org.testng.annotations.Test;
public class StressTest extends BaseTest {
public static int echo(int x) {
return x;
}
@Test
public void testSubmittingTasks() {
TestUtils.skipTestUnderSingleProcess();
for (int numIterations : ImmutableList.of(1, 10, 100, 1000)) {
int numTasks = 1000 / numIterations;
for (int i = 0; i < numIterations; i++) {
List<ObjectId> resultIds = new ArrayList<>();
for (int j = 0; j < numTasks; j++) {
resultIds.add(Ray.call(StressTest::echo, 1).getId());
}
for (Integer result : Ray.<Integer>get(resultIds)) {
Assert.assertEquals(result, Integer.valueOf(1));
}
}
}
}
@Test
public void testDependency() {
TestUtils.skipTestUnderSingleProcess();
RayObject<Integer> x = Ray.call(StressTest::echo, 1);
for (int i = 0; i < 1000; i++) {
x = Ray.call(StressTest::echo, x);
}
Assert.assertEquals(x.get(), Integer.valueOf(1));
}
public static class Actor {
public int ping() {
return 1;
}
}
public static class Worker {
private RayActor<Actor> actor;
public Worker(RayActor<Actor> actor) {
this.actor = actor;
}
public int ping(int n) {
List<ObjectId> objectIds = new ArrayList<>();
for (int i = 0; i < n; i++) {
objectIds.add(Ray.call(Actor::ping, actor).getId());
}
int sum = 0;
for (Integer result : Ray.<Integer>get(objectIds)) {
sum += result;
}
return sum;
}
}
@Test(groups = {"directCall"})
public void testSubmittingManyTasksToOneActor() {
TestUtils.skipTestUnderSingleProcess();
RayActor<Actor> actor = Ray.createActor(Actor::new);
List<ObjectId> objectIds = new ArrayList<>();
for (int i = 0; i < 10; i++) {
RayActor<Worker> worker = Ray.createActor(Worker::new, actor);
objectIds.add(Ray.call(Worker::ping, worker, 100).getId());
}
for (Integer result : Ray.<Integer>get(objectIds)) {
Assert.assertEquals(result, Integer.valueOf(100));
}
}
@Test
public void testPuttingAndGettingManyObjects() {
TestUtils.skipTestUnderSingleProcess();
Integer objectToPut = 1;
List<RayObject<Integer>> objects = new ArrayList<>();
for (int i = 0; i < 100_000; i++) {
objects.add(Ray.put(objectToPut));
}
for (RayObject<Integer> object : objects) {
Assert.assertEquals(object.get(), objectToPut);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/UniqueIdTest.java | Java | package org.ray.api.test;
import java.nio.ByteBuffer;
import java.util.Arrays;
import javax.xml.bind.DatatypeConverter;
import org.ray.api.id.ObjectId;
import org.ray.api.id.TaskId;
import org.ray.api.id.UniqueId;
import org.ray.runtime.util.IdUtil;
import org.testng.Assert;
import org.testng.annotations.Test;
public class UniqueIdTest {
@Test
public void testConstructUniqueId() {
// Test `fromHexString()`
UniqueId id1 = UniqueId.fromHexString("00000000123456789ABCDEF123456789ABCDEF00");
Assert.assertEquals("00000000123456789abcdef123456789abcdef00", id1.toString());
Assert.assertFalse(id1.isNil());
try {
UniqueId id2 = UniqueId.fromHexString("000000123456789ABCDEF123456789ABCDEF00");
// This shouldn't be happened.
Assert.assertTrue(false);
} catch (IllegalArgumentException e) {
Assert.assertTrue(true);
}
try {
UniqueId id3 = UniqueId.fromHexString("GGGGGGGGGGGGG");
// This shouldn't be happened.
Assert.assertTrue(false);
} catch (IllegalArgumentException e) {
Assert.assertTrue(true);
}
// Test `fromByteBuffer()`
byte[] bytes = DatatypeConverter.parseHexBinary("0123456789ABCDEF0123456789ABCDEF01234567");
ByteBuffer byteBuffer = ByteBuffer.wrap(bytes, 0, 20);
UniqueId id4 = UniqueId.fromByteBuffer(byteBuffer);
Assert.assertTrue(Arrays.equals(bytes, id4.getBytes()));
Assert.assertEquals("0123456789abcdef0123456789abcdef01234567", id4.toString());
// Test `genNil()`
UniqueId id6 = UniqueId.NIL;
Assert.assertEquals("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase(), id6.toString());
Assert.assertTrue(id6.isNil());
}
@Test
void testMurmurHash() {
UniqueId id = UniqueId.fromHexString("3131313131313131313132323232323232323232");
long remainder = Long.remainderUnsigned(IdUtil.murmurHashCode(id), 1000000000);
Assert.assertEquals(remainder, 787616861);
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/WaitTest.java | Java | package org.ray.api.test;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.WaitResult;
import org.ray.api.annotation.RayRemote;
import org.testng.Assert;
import org.testng.annotations.Test;
public class WaitTest extends BaseTest {
@RayRemote
private static String hi() {
return "hi";
}
@RayRemote
private static String delayedHi() {
try {
Thread.sleep(100 * 1000);
} catch (Exception e) {
e.printStackTrace();
}
return "hi";
}
private static void testWait() {
// Call a task in advance to warm up the cluster to avoid being too slow to start workers.
TestUtils.warmUpCluster();
RayObject<String> obj1 = Ray.call(WaitTest::hi);
RayObject<String> obj2 = Ray.call(WaitTest::delayedHi);
List<RayObject<String>> waitList = ImmutableList.of(obj1, obj2);
WaitResult<String> waitResult = Ray.wait(waitList, 2, 2 * 1000);
List<RayObject<String>> readyList = waitResult.getReady();
Assert.assertEquals(1, waitResult.getReady().size());
Assert.assertEquals(1, waitResult.getUnready().size());
Assert.assertEquals("hi", readyList.get(0).get());
}
@Test
public void testWaitInDriver() {
testWait();
}
@RayRemote
public static Object waitInWorker() {
testWait();
return null;
}
@Test
public void testWaitInWorker() {
RayObject<Object> res = Ray.call(WaitTest::waitInWorker);
res.get();
}
@Test
public void testWaitForEmpty() {
WaitResult<String> result = Ray.wait(new ArrayList<>());
Assert.assertTrue(result.getReady().isEmpty());
Assert.assertTrue(result.getUnready().isEmpty());
try {
Ray.wait(null);
Assert.fail();
} catch (NullPointerException e) {
Assert.assertTrue(true);
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/java/org/ray/api/test/WorkerJvmOptionsTest.java | Java | package org.ray.api.test;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.TestUtils;
import org.ray.api.annotation.RayRemote;
import org.ray.api.options.ActorCreationOptions;
import org.testng.Assert;
import org.testng.annotations.Test;
public class WorkerJvmOptionsTest extends BaseTest {
@RayRemote
public static class Echo {
String getOptions() {
return System.getProperty("test.suffix");
}
}
@Test
public void testJvmOptions() {
TestUtils.skipTestUnderSingleProcess();
ActorCreationOptions options = new ActorCreationOptions.Builder()
// The whitespaces in following argument are intentionally added to test
// that raylet can correctly handle dynamic options with whitespaces.
.setJvmOptions(" -Dtest.suffix=suffix -Dtest.suffix1=suffix1 ")
.createActorCreationOptions();
RayActor<Echo> actor = Ray.createActor(Echo::new, options);
RayObject<String> obj = Ray.call(Echo::getOptions, actor);
Assert.assertEquals(obj.get(), "suffix");
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/test/src/main/resources/test_cross_language_invocation.py | Python | # This file is used by CrossLanguageInvocationTest.java to test cross-language
# invocation.
import six
import ray
@ray.remote
def py_func(value):
assert isinstance(value, bytes)
return b"Response from Python: " + value
@ray.remote
class Counter(object):
def __init__(self, value):
self.value = int(value)
def increase(self, delta):
self.value += int(delta)
return str(self.value).encode("utf-8") if six.PY3 else str(self.value)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/tutorial/src/main/java/org/ray/exercise/Exercise01.java | Java | package org.ray.exercise;
import java.io.Serializable;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
/**
* Define a remote function, and execute multiple remote functions in parallel.
*/
public class Exercise01 implements Serializable {
/**
* A plain remote function.
*/
// `@RayRemote` annotation converts a normal function to a remote function.
@RayRemote
public static String sayHello() {
String ret = "hello";
System.out.println(ret);
return ret;
}
@RayRemote
public static String sayWorld() {
String ret = "world!";
System.out.println(ret);
return ret;
}
public static void main(String[] args) throws Exception {
try {
// Use `Ray.init` to initialize the Ray runtime.
Ray.init();
// Use `Ray.call` to call a remote function.
RayObject<String> hello = Ray.call(Exercise01::sayHello);
RayObject<String> world = Ray.call(Exercise01::sayWorld);
System.out.println("First remote call result:" + hello.get());
System.out.println("Second remote call result:" + world.get());
} catch (Throwable t) {
t.printStackTrace();
} finally {
Ray.shutdown();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/tutorial/src/main/java/org/ray/exercise/Exercise02.java | Java | package org.ray.exercise;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
/**
* Execute remote functions in parallel with some dependencies.
*/
public class Exercise02 {
@RayRemote
public static String sayHello() {
String ret = "hello";
System.out.println(ret);
return ret;
}
@RayRemote
public static String sayWorld() {
String ret = "world!";
System.out.println(ret);
return ret;
}
/**
* A remote function with dependency.
*/
@RayRemote
public static String merge(String hello, String world) {
return hello + "," + world;
}
public static String sayHelloWorld() {
RayObject<String> hello = Ray.call(Exercise02::sayHello);
RayObject<String> world = Ray.call(Exercise02::sayWorld);
// Pass unfinished results as the parameters to another remote function.
return Ray.call(Exercise02::merge, hello, world).get();
}
public static void main(String[] args) throws Exception {
try {
Ray.init();
String helloWorld = Exercise02.sayHelloWorld();
System.out.println(helloWorld);
assert helloWorld.equals("hello,world!");
} catch (Throwable t) {
t.printStackTrace();
} finally {
Ray.shutdown();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/tutorial/src/main/java/org/ray/exercise/Exercise03.java | Java | package org.ray.exercise;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
/**
* Call a remote function from within another remote function.
*/
public class Exercise03 {
/**
* A remote function which will call another remote function.
*/
@RayRemote
public static String sayHelloWithWorld() {
String ret = "hello";
System.out.println(ret);
RayObject<String> world = Ray.call(Exercise03::sayWorld);
return ret + "," + world.get();
}
/**
* A remote function which will be called by another remote function.
*/
@RayRemote
public static String sayWorld() {
String ret = "world!";
System.out.println(ret);
return ret;
}
public static void main(String[] args) throws Exception {
try {
Ray.init();
String helloWithWorld = Ray.call(Exercise03::sayHelloWithWorld).get();
System.out.println(helloWithWorld);
} catch (Throwable t) {
t.printStackTrace();
} finally {
Ray.shutdown();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/tutorial/src/main/java/org/ray/exercise/Exercise04.java | Java | package org.ray.exercise;
import com.google.common.collect.ImmutableList;
import java.util.List;
import org.ray.api.Ray;
import org.ray.api.RayObject;
import org.ray.api.WaitResult;
import org.ray.api.annotation.RayRemote;
/**
* Use Ray.wait to ignore stragglers
*/
public class Exercise04 {
@RayRemote
public static String f1() {
System.out.println("Executing f1");
return "f1";
}
@RayRemote
public static String f2() {
System.out.println("Executing f2");
return "f2";
}
/**
* A slow remote function.
*/
@RayRemote
public static String f3() {
System.out.println("Executing f3");
try {
Thread.sleep(5000L);
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("Finished executing f3");
return "f3";
}
public static void main(String[] args) throws Exception {
try {
Ray.init();
List<RayObject<String>> waitList = ImmutableList.of(
Ray.call(Exercise04::f1),
Ray.call(Exercise04::f2),
Ray.call(Exercise04::f3)
);
// Ray.wait will block until specified number of results are ready
// or specified timeout have passed.
// In this case, the result of f3 will be ignored.
WaitResult<String> waitResult = Ray.wait(waitList, 2, 3000);
System.out.printf("%d ready object(s): \n", waitResult.getReady().size());
waitResult.getReady().forEach(rayObject -> System.out.println(rayObject.get()));
System.out.printf("%d unready object(s): \n", waitResult.getUnready().size());
waitResult.getUnready().forEach(rayObject -> System.out.println(rayObject.getId()));
} catch (Throwable t) {
t.printStackTrace();
} finally {
Ray.shutdown();
}
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
java/tutorial/src/main/java/org/ray/exercise/Exercise05.java | Java | package org.ray.exercise;
import org.ray.api.Ray;
import org.ray.api.RayActor;
import org.ray.api.RayObject;
import org.ray.api.annotation.RayRemote;
/**
* Show usage of actors.
*/
public class Exercise05 {
public static void main(String[] args) {
try {
Ray.init();
// `Ray.createActor` creates an actor instance.
RayActor<Adder> adder = Ray.createActor(Adder::new, 0);
// Use `Ray.call(actor, parameters)` to call an actor method.
RayObject<Integer> result1 = Ray.call(Adder::add, adder, 1);
System.out.println(result1.get());
RayObject<Integer> result2 = Ray.call(Adder::add, adder, 10);
System.out.println(result2.get());
} catch (Throwable t) {
t.printStackTrace();
} finally {
Ray.shutdown();
}
}
/**
* An example actor.
*/
// `@RayRemote` annotation also converts a normal class to an actor.
@RayRemote
public static class Adder {
public Adder(int initValue) {
sum = initValue;
}
public int add(int n) {
return sum += n;
}
private int sum;
}
}
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/build-wheel-macos.sh | Shell | #!/bin/bash
# Cause the script to exit if a single command fails.
set -e
# Show explicitly which commands are currently running.
set -x
# Much of this is taken from https://github.com/matthew-brett/multibuild.
# This script uses "sudo", so you may need to type in a password a couple times.
MACPYTHON_URL=https://www.python.org/ftp/python
MACPYTHON_PY_PREFIX=/Library/Frameworks/Python.framework/Versions
DOWNLOAD_DIR=python_downloads
PY_VERSIONS=("3.5.3"
"3.6.1"
"3.7.0")
PY_INSTS=("python-3.5.3-macosx10.6.pkg"
"python-3.6.1-macosx10.6.pkg"
"python-3.7.0-macosx10.6.pkg")
PY_MMS=("3.5"
"3.6"
"3.7")
# The minimum supported numpy version is 1.14, see
# https://issues.apache.org/jira/browse/ARROW-3141
NUMPY_VERSIONS=("1.14.5"
"1.14.5"
"1.14.5")
./ci/travis/install-bazel.sh
mkdir -p $DOWNLOAD_DIR
mkdir -p .whl
# Use the latest version of Node.js in order to build the dashboard.
source $HOME/.nvm/nvm.sh
nvm use node
# Build the dashboard so its static assets can be included in the wheel.
pushd python/ray/dashboard/client
npm ci
npm run build
popd
for ((i=0; i<${#PY_VERSIONS[@]}; ++i)); do
PY_VERSION=${PY_VERSIONS[i]}
PY_INST=${PY_INSTS[i]}
PY_MM=${PY_MMS[i]}
NUMPY_VERSION=${NUMPY_VERSIONS[i]}
# The -f flag is passed twice to also run git clean in the arrow subdirectory.
# The -d flag removes directories. The -x flag ignores the .gitignore file,
# and the -e flag ensures that we don't remove the .whl directory.
git clean -f -f -x -d -e .whl -e $DOWNLOAD_DIR -e python/ray/dashboard/client
# Install Python.
INST_PATH=python_downloads/$PY_INST
curl $MACPYTHON_URL/$PY_VERSION/$PY_INST > $INST_PATH
sudo installer -pkg $INST_PATH -target /
PYTHON_EXE=$MACPYTHON_PY_PREFIX/$PY_MM/bin/python$PY_MM
PIP_CMD="$(dirname $PYTHON_EXE)/pip$PY_MM"
pushd /tmp
# Install latest version of pip to avoid brownouts.
curl https://bootstrap.pypa.io/get-pip.py | $PYTHON_EXE
popd
pushd python
# Setuptools on CentOS is too old to install arrow 0.9.0, therefore we upgrade.
$PIP_CMD install --upgrade setuptools
# Install setuptools_scm because otherwise when building the wheel for
# Python 3.6, we see an error.
$PIP_CMD install -q setuptools_scm==3.1.0
# Fix the numpy version because this will be the oldest numpy version we can
# support.
$PIP_CMD install -q numpy==$NUMPY_VERSION cython==0.29.0
# Install wheel to avoid the error "invalid command 'bdist_wheel'".
$PIP_CMD install -q wheel
# Add the correct Python to the path and build the wheel. This is only
# needed so that the installation finds the cython executable.
PATH=$MACPYTHON_PY_PREFIX/$PY_MM/bin:$PATH $PYTHON_EXE setup.py bdist_wheel
mv dist/*.whl ../.whl/
popd
done
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/build-wheel-manylinux1.sh | Shell | #!/bin/bash
set -x
# Cause the script to exit if a single command fails.
set -e
cat << EOF > "/usr/bin/nproc"
#!/bin/bash
echo 10
EOF
chmod +x /usr/bin/nproc
PYTHONS=("cp35-cp35m"
"cp36-cp36m"
"cp37-cp37m")
# The minimum supported numpy version is 1.14, see
# https://issues.apache.org/jira/browse/ARROW-3141
NUMPY_VERSIONS=("1.14.5"
"1.14.5"
"1.14.5")
sudo apt-get install unzip
/ray/ci/travis/install-bazel.sh
# Put bazel into the PATH
export PATH=$PATH:/root/bin
# Install and use the latest version of Node.js in order to build the dashboard.
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
source $HOME/.nvm/nvm.sh
nvm install node
nvm use node
# Build the dashboard so its static assets can be included in the wheel.
pushd python/ray/dashboard/client
npm ci
npm run build
popd
mkdir .whl
for ((i=0; i<${#PYTHONS[@]}; ++i)); do
PYTHON=${PYTHONS[i]}
NUMPY_VERSION=${NUMPY_VERSIONS[i]}
# The -f flag is passed twice to also run git clean in the arrow subdirectory.
# The -d flag removes directories. The -x flag ignores the .gitignore file,
# and the -e flag ensures that we don't remove the .whl directory and the
# dashboard directory.
git clean -f -f -x -d -e .whl -e python/ray/dashboard/client
pushd python
# Fix the numpy version because this will be the oldest numpy version we can
# support.
/opt/python/${PYTHON}/bin/pip install -q numpy==${NUMPY_VERSION} cython==0.29.0
PATH=/opt/python/${PYTHON}/bin:$PATH /opt/python/${PYTHON}/bin/python setup.py bdist_wheel
# In the future, run auditwheel here.
mv dist/*.whl ../.whl/
popd
done
# Rename the wheels so that they can be uploaded to PyPI. TODO(rkn): This is a
# hack, we should use auditwheel instead.
pushd .whl
find *.whl -exec bash -c 'mv $1 ${1//linux/manylinux1}' bash {} \;
popd
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/__init__.py | Python | import os
from os.path import dirname
import sys
# MUST add pickle5 to the import path because it will be imported by some
# raylet modules.
if "pickle5" in sys.modules:
raise ImportError("Ray must be imported before pickle5 because Ray "
"requires a specific version of pickle5 (which is "
"packaged along with Ray).")
# Add the directory containing pickle5 to the Python path so that we find the
# pickle5 version packaged with ray and not a pre-existing pickle5.
pickle5_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
sys.path.insert(0, pickle5_path)
# Expose ray ABI symbols which may be dependent by other shared
# libraries such as _streaming.so. See BUILD.bazel:_raylet
so_path = os.path.join(dirname(__file__), "_raylet.so")
if os.path.exists(so_path):
import ctypes
from ctypes import CDLL
CDLL(so_path, ctypes.RTLD_GLOBAL)
# MUST import ray._raylet before pyarrow to initialize some global variables.
# It seems the library related to memory allocation in pyarrow will destroy the
# initialization of grpc if we import pyarrow at first.
# NOTE(JoeyJiang): See https://github.com/ray-project/ray/issues/5219 for more
# details.
import ray._raylet # noqa: E402
if "pyarrow" in sys.modules:
raise ImportError("Ray must be imported before pyarrow because Ray "
"requires a specific version of pyarrow (which is "
"packaged along with Ray).")
# Add the directory containing pyarrow to the Python path so that we find the
# pyarrow version packaged with ray and not a pre-existing pyarrow.
pyarrow_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
sys.path.insert(0, pyarrow_path)
# See https://github.com/ray-project/ray/issues/131.
helpful_message = """
If you are using Anaconda, try fixing this problem by running:
conda install libgcc
"""
try:
import pyarrow # noqa: F401
# pyarrow is not imported inside of _raylet because of the issue described
# above. In order for Cython to compile _raylet, pyarrow is set to None
# in _raylet instead, so we give _raylet a real reference to it here.
# We first do the attribute checks here so that building the documentation
# succeeds without fully installing ray..
# TODO(edoakes): Fix this.
if hasattr(ray, "_raylet") and hasattr(ray._raylet, "pyarrow"):
ray._raylet.pyarrow = pyarrow
except ImportError as e:
if ((hasattr(e, "msg") and isinstance(e.msg, str)
and ("libstdc++" in e.msg or "CXX" in e.msg))):
# This code path should be taken with Python 3.
e.msg += helpful_message
elif (hasattr(e, "message") and isinstance(e.message, str)
and ("libstdc++" in e.message or "CXX" in e.message)):
# This code path should be taken with Python 2.
condition = (hasattr(e, "args") and isinstance(e.args, tuple)
and len(e.args) == 1 and isinstance(e.args[0], str))
if condition:
e.args = (e.args[0] + helpful_message, )
else:
if not hasattr(e, "args"):
e.args = ()
elif not isinstance(e.args, tuple):
e.args = (e.args, )
e.args += (helpful_message, )
raise
from ray._raylet import (
ActorCheckpointID,
ActorClassID,
ActorID,
ClientID,
Config as _Config,
JobID,
WorkerID,
FunctionID,
ObjectID,
TaskID,
UniqueID,
) # noqa: E402
_config = _Config()
from ray.profiling import profile # noqa: E402
from ray.state import (jobs, nodes, actors, tasks, objects, timeline,
object_transfer_timeline, cluster_resources,
available_resources, errors) # noqa: E402
from ray.worker import (
LOCAL_MODE,
SCRIPT_MODE,
WORKER_MODE,
connect,
disconnect,
get,
get_gpu_ids,
get_resource_ids,
get_webui_url,
init,
is_initialized,
put,
register_custom_serializer,
remote,
shutdown,
show_in_webui,
wait,
) # noqa: E402
import ray.internal # noqa: E402
import ray.projects # noqa: E402
# We import ray.actor because some code is run in actor.py which initializes
# some functions in the worker.
import ray.actor # noqa: F401
from ray.actor import method # noqa: E402
from ray.runtime_context import _get_runtime_context # noqa: E402
# Ray version string.
__version__ = "0.9.0.dev0"
__all__ = [
"jobs",
"nodes",
"actors",
"tasks",
"objects",
"timeline",
"object_transfer_timeline",
"cluster_resources",
"available_resources",
"errors",
"LOCAL_MODE",
"PYTHON_MODE",
"SCRIPT_MODE",
"WORKER_MODE",
"__version__",
"_config",
"_get_runtime_context",
"actor",
"connect",
"disconnect",
"get",
"get_gpu_ids",
"get_resource_ids",
"get_webui_url",
"init",
"internal",
"is_initialized",
"method",
"profile",
"projects",
"put",
"register_custom_serializer",
"remote",
"shutdown",
"show_in_webui",
"wait",
]
# ID types
__all__ += [
"ActorCheckpointID",
"ActorClassID",
"ActorID",
"ClientID",
"JobID",
"WorkerID",
"FunctionID",
"ObjectID",
"TaskID",
"UniqueID",
]
import ctypes # noqa: E402
# Windows only
if hasattr(ctypes, "windll"):
# Makes sure that all child processes die when we die. Also makes sure that
# fatal crashes result in process termination rather than an error dialog
# (the latter is annoying since we have a lot of processes). This is done
# by associating all child processes with a "job" object that imposes this
# behavior.
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/_raylet.pxd | Cython | # cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp cimport bool as c_bool
from libcpp.string cimport string as c_string
from libcpp.vector cimport vector as c_vector
from libcpp.memory cimport (
shared_ptr,
unique_ptr
)
from ray.includes.common cimport (
CBuffer,
CRayObject
)
from ray.includes.libcoreworker cimport CCoreWorker
from ray.includes.unique_ids cimport (
CObjectID,
CActorID
)
cdef class Buffer:
cdef:
shared_ptr[CBuffer] buffer
Py_ssize_t shape
Py_ssize_t strides
@staticmethod
cdef make(const shared_ptr[CBuffer]& buffer)
cdef class BaseID:
# To avoid the error of "Python int too large to convert to C ssize_t",
# here `cdef size_t` is required.
cdef size_t hash(self)
cdef class ObjectID(BaseID):
cdef:
CObjectID data
# Flag indicating whether or not this object ID was added to the set
# of active IDs in the core worker so we know whether we should clean
# it up.
c_bool in_core_worker
cdef CObjectID native(self)
cdef class ActorID(BaseID):
cdef CActorID data
cdef CActorID native(self)
cdef size_t hash(self)
cdef class CoreWorker:
cdef:
unique_ptr[CCoreWorker] core_worker
object async_thread
object async_event_loop
cdef _create_put_buffer(self, shared_ptr[CBuffer] &metadata,
size_t data_size, ObjectID object_id,
CObjectID *c_object_id, shared_ptr[CBuffer] *data)
# TODO: handle noreturn better
cdef store_task_outputs(
self, worker, outputs, const c_vector[CObjectID] return_ids,
c_vector[shared_ptr[CRayObject]] *returns)
cdef c_vector[c_string] string_vector_from_list(list string_list)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/_raylet.pyx | Cython | # cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cpython.exc cimport PyErr_CheckSignals
try:
import asyncio
except ImportError:
# Python2 doesn't have asyncio
asyncio = None
import numpy
import gc
import inspect
import threading
import time
import logging
import os
import sys
from libc.stdint cimport (
int32_t,
int64_t,
INT64_MAX,
uint64_t,
uint8_t,
)
from libcpp cimport bool as c_bool
from libcpp.memory cimport (
dynamic_pointer_cast,
make_shared,
shared_ptr,
unique_ptr,
)
from libcpp.string cimport string as c_string
from libcpp.utility cimport pair
from libcpp.unordered_map cimport unordered_map
from libcpp.vector cimport vector as c_vector
from cython.operator import dereference, postincrement
from ray.includes.common cimport (
CBuffer,
CAddress,
CLanguage,
CRayObject,
CRayStatus,
CGcsClientOptions,
CTaskArg,
CTaskType,
CRayFunction,
LocalMemoryBuffer,
move,
LANGUAGE_CPP,
LANGUAGE_JAVA,
LANGUAGE_PYTHON,
LocalMemoryBuffer,
TASK_TYPE_NORMAL_TASK,
TASK_TYPE_ACTOR_CREATION_TASK,
TASK_TYPE_ACTOR_TASK,
WORKER_TYPE_WORKER,
WORKER_TYPE_DRIVER,
)
from ray.includes.libraylet cimport (
CRayletClient,
GCSProfileEvent,
GCSProfileTableData,
WaitResultPair,
)
from ray.includes.unique_ids cimport (
CActorID,
CActorCheckpointID,
CObjectID,
CClientID,
)
from ray.includes.libcoreworker cimport (
CActorCreationOptions,
CCoreWorker,
CTaskOptions,
ResourceMappingType,
CFiberEvent
)
from ray.includes.task cimport CTaskSpec
from ray.includes.ray_config cimport RayConfig
import ray
import ray.experimental.signal as ray_signal
import ray.memory_monitor as memory_monitor
import ray.ray_constants as ray_constants
from ray import profiling
from ray.exceptions import (
RayError,
RayletError,
RayTaskError,
ObjectStoreFullError,
RayTimeoutError,
)
from ray.experimental.no_return import NoReturn
from ray.function_manager import FunctionDescriptor
from ray.utils import decode
from ray.ray_constants import (
DEFAULT_PUT_OBJECT_DELAY,
DEFAULT_PUT_OBJECT_RETRIES,
RAW_BUFFER_METADATA,
PICKLE_BUFFER_METADATA,
PICKLE5_BUFFER_METADATA,
)
# pyarrow cannot be imported until after _raylet finishes initializing
# (see ray/__init__.py for details).
# Unfortunately, Cython won't compile if 'pyarrow' is undefined, so we
# "forward declare" it here and then replace it with a reference to the
# imported package from ray/__init__.py.
# TODO(edoakes): Fix this.
pyarrow = None
cimport cpython
include "includes/unique_ids.pxi"
include "includes/ray_config.pxi"
include "includes/task.pxi"
include "includes/buffer.pxi"
include "includes/common.pxi"
include "includes/serialization.pxi"
include "includes/libcoreworker.pxi"
logger = logging.getLogger(__name__)
MEMCOPY_THREADS = 12
PY3 = cpython.PY_MAJOR_VERSION >= 3
if PY3:
import pickle
else:
import cPickle as pickle
if PY3:
from ray.async_compat import sync_to_async, AsyncGetResponse
def set_internal_config(dict options):
cdef:
unordered_map[c_string, c_string] c_options
if options is None:
return
for key, value in options.items():
c_options[str(key).encode("ascii")] = str(value).encode("ascii")
RayConfig.instance().initialize(c_options)
cdef int check_status(const CRayStatus& status) nogil except -1:
if status.ok():
return 0
with gil:
message = status.message().decode()
if status.IsObjectStoreFull():
raise ObjectStoreFullError(message)
elif status.IsInterrupted():
raise KeyboardInterrupt()
elif status.IsTimedOut():
raise RayTimeoutError(message)
else:
raise RayletError(message)
cdef RayObjectsToDataMetadataPairs(
const c_vector[shared_ptr[CRayObject]] objects):
data_metadata_pairs = []
for i in range(objects.size()):
# core_worker will return a nullptr for objects that couldn't be
# retrieved from the store or if an object was an exception.
if not objects[i].get():
data_metadata_pairs.append((None, None))
else:
data = None
metadata = None
if objects[i].get().HasData():
data = Buffer.make(objects[i].get().GetData())
if objects[i].get().HasMetadata():
metadata = Buffer.make(
objects[i].get().GetMetadata()).to_pybytes()
data_metadata_pairs.append((data, metadata))
return data_metadata_pairs
cdef VectorToObjectIDs(const c_vector[CObjectID] &object_ids):
result = []
for i in range(object_ids.size()):
result.append(ObjectID(object_ids[i].Binary()))
return result
cdef c_vector[CObjectID] ObjectIDsToVector(object_ids):
"""A helper function that converts a Python list of object IDs to a vector.
Args:
object_ids (list): The Python list of object IDs.
Returns:
The output vector.
"""
cdef:
ObjectID object_id
c_vector[CObjectID] result
for object_id in object_ids:
result.push_back(object_id.native())
return result
def compute_task_id(ObjectID object_id):
return TaskID(object_id.native().TaskId().Binary())
cdef c_bool is_simple_value(value, int64_t *num_elements_contained):
num_elements_contained[0] += 1
if num_elements_contained[0] >= RayConfig.instance().num_elements_limit():
return False
if (cpython.PyInt_Check(value) or cpython.PyLong_Check(value) or
value is False or value is True or cpython.PyFloat_Check(value) or
value is None):
return True
if cpython.PyBytes_CheckExact(value):
num_elements_contained[0] += cpython.PyBytes_Size(value)
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
if cpython.PyUnicode_CheckExact(value):
num_elements_contained[0] += cpython.PyUnicode_GET_SIZE(value)
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
if (cpython.PyList_CheckExact(value) and
cpython.PyList_Size(value) < RayConfig.instance().size_limit()):
for item in value:
if not is_simple_value(item, num_elements_contained):
return False
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
if (cpython.PyDict_CheckExact(value) and
cpython.PyDict_Size(value) < RayConfig.instance().size_limit()):
# TODO(suquark): Using "items" in Python2 is not very efficient.
for k, v in value.items():
if not (is_simple_value(k, num_elements_contained) and
is_simple_value(v, num_elements_contained)):
return False
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
if (cpython.PyTuple_CheckExact(value) and
cpython.PyTuple_Size(value) < RayConfig.instance().size_limit()):
for item in value:
if not is_simple_value(item, num_elements_contained):
return False
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
if isinstance(value, numpy.ndarray):
if value.dtype == "O":
return False
num_elements_contained[0] += value.nbytes
return (num_elements_contained[0] <
RayConfig.instance().num_elements_limit())
return False
def check_simple_value(value):
"""Check if value is simple enough to be send by value.
This method checks if a Python object is sufficiently simple that it can
be serialized and passed by value as an argument to a task (without being
put in the object store). The details of which objects are sufficiently
simple are defined by this method and are not particularly important.
But for performance reasons, it is better to place "small" objects in
the task itself and "large" objects in the object store.
Args:
value: Python object that should be checked.
Returns:
True if the value should be send by value, False otherwise.
"""
cdef int64_t num_elements_contained = 0
return is_simple_value(value, &num_elements_contained)
cdef class Language:
cdef CLanguage lang
def __cinit__(self, int32_t lang):
self.lang = <CLanguage>lang
@staticmethod
cdef from_native(const CLanguage& lang):
return Language(<int32_t>lang)
def __eq__(self, other):
return (isinstance(other, Language) and
(<int32_t>self.lang) == (<int32_t>other.lang))
def __repr__(self):
if <int32_t>self.lang == <int32_t>LANGUAGE_PYTHON:
return "PYTHON"
elif <int32_t>self.lang == <int32_t>LANGUAGE_CPP:
return "CPP"
elif <int32_t>self.lang == <int32_t>LANGUAGE_JAVA:
return "JAVA"
else:
raise Exception("Unexpected error")
# Programming language enum values.
cdef Language LANG_PYTHON = Language.from_native(LANGUAGE_PYTHON)
cdef Language LANG_CPP = Language.from_native(LANGUAGE_CPP)
cdef Language LANG_JAVA = Language.from_native(LANGUAGE_JAVA)
cdef int prepare_resources(
dict resource_dict,
unordered_map[c_string, double] *resource_map) except -1:
cdef:
unordered_map[c_string, double] out
c_string resource_name
if resource_dict is None:
raise ValueError("Must provide resource map.")
for key, value in resource_dict.items():
if not (isinstance(value, int) or isinstance(value, float)):
raise ValueError("Resource quantities may only be ints or floats.")
if value < 0:
raise ValueError("Resource quantities may not be negative.")
if value > 0:
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities >1 must be whole numbers.")
resource_map[0][key.encode("ascii")] = float(value)
return 0
cdef c_vector[c_string] string_vector_from_list(list string_list):
cdef:
c_vector[c_string] out
for s in string_list:
if not isinstance(s, bytes):
raise TypeError("string_list elements must be bytes")
out.push_back(s)
return out
cdef:
c_string pickle_metadata_str = PICKLE_BUFFER_METADATA
shared_ptr[CBuffer] pickle_metadata = dynamic_pointer_cast[
CBuffer, LocalMemoryBuffer](
make_shared[LocalMemoryBuffer](
<uint8_t*>(pickle_metadata_str.data()),
pickle_metadata_str.size(), True))
c_string raw_meta_str = RAW_BUFFER_METADATA
shared_ptr[CBuffer] raw_metadata = dynamic_pointer_cast[
CBuffer, LocalMemoryBuffer](
make_shared[LocalMemoryBuffer](
<uint8_t*>(raw_meta_str.data()),
raw_meta_str.size(), True))
cdef void prepare_args(list args, c_vector[CTaskArg] *args_vector):
cdef:
c_string pickled_str
const unsigned char[:] buffer
size_t size
shared_ptr[CBuffer] arg_data
shared_ptr[CBuffer] arg_metadata
# TODO be consistent with store_task_outputs
for arg in args:
if isinstance(arg, ObjectID):
args_vector.push_back(
CTaskArg.PassByReference((<ObjectID>arg).native()))
elif not ray._raylet.check_simple_value(arg):
args_vector.push_back(
CTaskArg.PassByReference((<ObjectID>ray.put(arg)).native()))
elif type(arg) is bytes:
buffer = arg
size = buffer.nbytes
arg_data = dynamic_pointer_cast[CBuffer, LocalMemoryBuffer](
make_shared[LocalMemoryBuffer](
<uint8_t*>(&buffer[0]), size, True))
args_vector.push_back(
CTaskArg.PassByValue(
make_shared[CRayObject](arg_data, raw_metadata)))
else:
buffer = pickle.dumps(
arg, protocol=pickle.HIGHEST_PROTOCOL)
size = buffer.nbytes
arg_data = dynamic_pointer_cast[CBuffer, LocalMemoryBuffer](
make_shared[LocalMemoryBuffer](
<uint8_t*>(&buffer[0]), size, True))
args_vector.push_back(
CTaskArg.PassByValue(
make_shared[CRayObject](arg_data, pickle_metadata)))
cdef class RayletClient:
cdef CRayletClient* client
def __cinit__(self, CoreWorker core_worker):
# The core worker and raylet client need to share an underlying
# raylet client, so we take a reference to the core worker's client
# here. The client is a raw pointer because it is only a temporary
# workaround and will be removed once the core worker transition is
# complete, so we don't want to change the unique_ptr in core worker
# to a shared_ptr. This means the core worker *must* be
# initialized before the raylet client.
self.client = &core_worker.core_worker.get().GetRayletClient()
def fetch_or_reconstruct(self, object_ids,
c_bool fetch_only,
TaskID current_task_id=TaskID.nil()):
cdef c_vector[CObjectID] fetch_ids = ObjectIDsToVector(object_ids)
check_status(self.client.FetchOrReconstruct(
fetch_ids, fetch_only, True, current_task_id.native()))
def push_error(self, JobID job_id, error_type, error_message,
double timestamp):
check_status(self.client.PushError(job_id.native(),
error_type.encode("ascii"),
error_message.encode("ascii"),
timestamp))
def prepare_actor_checkpoint(self, ActorID actor_id):
cdef:
CActorCheckpointID checkpoint_id
CActorID c_actor_id = actor_id.native()
# PrepareActorCheckpoint will wait for raylet's reply, release
# the GIL so other Python threads can run.
with nogil:
check_status(self.client.PrepareActorCheckpoint(
c_actor_id, checkpoint_id))
return ActorCheckpointID(checkpoint_id.Binary())
def notify_actor_resumed_from_checkpoint(self, ActorID actor_id,
ActorCheckpointID checkpoint_id):
check_status(self.client.NotifyActorResumedFromCheckpoint(
actor_id.native(), checkpoint_id.native()))
def set_resource(self, basestring resource_name,
double capacity, ClientID client_id):
self.client.SetResource(resource_name.encode("ascii"), capacity,
CClientID.FromBinary(client_id.binary()))
@property
def job_id(self):
return JobID(self.client.GetJobID().Binary())
cdef deserialize_args(
const c_vector[shared_ptr[CRayObject]] &c_args,
const c_vector[CObjectID] &arg_reference_ids):
cdef:
c_vector[shared_ptr[CRayObject]] objects_to_deserialize
if c_args.size() == 0:
return [], {}
args = []
ids_to_deserialize = []
id_indices = []
for i in range(c_args.size()):
# Passed by value.
if arg_reference_ids[i].IsNil():
if (c_args[i].get().HasMetadata()
and Buffer.make(
c_args[i].get().GetMetadata()).to_pybytes()
== RAW_BUFFER_METADATA):
data = Buffer.make(c_args[i].get().GetData())
args.append(data.to_pybytes())
elif (c_args[i].get().HasMetadata() and Buffer.make(
c_args[i].get().GetMetadata()).to_pybytes()
== PICKLE_BUFFER_METADATA):
# This is a pickled "simple python value" argument.
data = Buffer.make(c_args[i].get().GetData())
args.append(pickle.loads(data.to_pybytes()))
else:
# This is a Ray object inlined by the direct task submitter.
ids_to_deserialize.append(
ObjectID(arg_reference_ids[i].Binary()))
id_indices.append(i)
objects_to_deserialize.push_back(c_args[i])
args.append(None)
# Passed by reference.
else:
ids_to_deserialize.append(
ObjectID(arg_reference_ids[i].Binary()))
id_indices.append(i)
objects_to_deserialize.push_back(c_args[i])
args.append(None)
data_metadata_pairs = RayObjectsToDataMetadataPairs(
objects_to_deserialize)
for i, arg in enumerate(
ray.worker.global_worker.deserialize_objects(
data_metadata_pairs, ids_to_deserialize)):
args[id_indices[i]] = arg
for arg in args:
if isinstance(arg, RayError):
raise arg
return ray.signature.recover_args(args)
cdef execute_task(
CTaskType task_type,
const CRayFunction &ray_function,
const unordered_map[c_string, double] &c_resources,
const c_vector[shared_ptr[CRayObject]] &c_args,
const c_vector[CObjectID] &c_arg_reference_ids,
const c_vector[CObjectID] &c_return_ids,
c_vector[shared_ptr[CRayObject]] *returns):
worker = ray.worker.global_worker
manager = worker.function_actor_manager
cdef:
dict execution_infos = manager.execution_infos
CoreWorker core_worker = worker.core_worker
JobID job_id = core_worker.get_current_job_id()
CTaskID task_id = core_worker.core_worker.get().GetCurrentTaskId()
CFiberEvent fiber_event
# Automatically restrict the GPUs available to this task.
ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
descriptor = tuple(ray_function.GetFunctionDescriptor())
if <int>task_type == <int>TASK_TYPE_ACTOR_CREATION_TASK:
function_descriptor = FunctionDescriptor.from_bytes_list(
ray_function.GetFunctionDescriptor())
actor_class = manager.load_actor_class(job_id, function_descriptor)
actor_id = core_worker.get_actor_id()
worker.actors[actor_id] = actor_class.__new__(actor_class)
worker.actor_checkpoint_info[actor_id] = (
ray.worker.ActorCheckpointInfo(
num_tasks_since_last_checkpoint=0,
last_checkpoint_timestamp=int(1000 * time.time()),
checkpoint_ids=[]))
execution_info = execution_infos.get(descriptor)
if not execution_info:
function_descriptor = FunctionDescriptor.from_bytes_list(
ray_function.GetFunctionDescriptor())
execution_info = manager.get_execution_info(
job_id, function_descriptor)
execution_infos[descriptor] = execution_info
function_name = execution_info.function_name
extra_data = (b'{"name": ' + function_name.encode("ascii") +
b' "task_id": ' + task_id.Hex() + b'}')
if <int>task_type == <int>TASK_TYPE_NORMAL_TASK:
title = "ray::{}()".format(function_name)
next_title = "ray::IDLE"
function_executor = execution_info.function
else:
actor = worker.actors[core_worker.get_actor_id()]
class_name = actor.__class__.__name__
title = "ray::{}.{}()".format(class_name, function_name)
next_title = "ray::{}".format(class_name)
worker_name = "ray_{}_{}".format(class_name, os.getpid())
if c_resources.find(b"memory") != c_resources.end():
worker.memory_monitor.set_heap_limit(
worker_name,
ray_constants.from_memory_units(
dereference(c_resources.find(b"memory")).second))
if c_resources.find(b"object_store_memory") != c_resources.end():
worker.core_worker.set_object_store_client_options(
worker_name,
int(ray_constants.from_memory_units(
dereference(
c_resources.find(b"object_store_memory")).second)))
def function_executor(*arguments, **kwarguments):
# function_executor is a generator to make sure python decrement
# stack counter on context switch for async mode. If it is not
# a generator, python will count the stacks of executor as part
# of the recursion limit, resulting in much lower concurrency.
function = execution_info.function
if PY3 and core_worker.current_actor_is_asyncio():
if inspect.iscoroutinefunction(function.method):
async_function = function
else:
# Just execute the method if it's ray internal method.
if function.name.startswith("__ray"):
return function(actor, *arguments, **kwarguments)
async_function = sync_to_async(function)
coroutine = async_function(actor, *arguments, **kwarguments)
loop = core_worker.create_or_get_event_loop()
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
future.add_done_callback(
lambda future: fiber_event.Notify())
with nogil:
(core_worker.core_worker.get()
.YieldCurrentFiber(fiber_event))
yield future.result()
yield function(actor, *arguments, **kwarguments)
with core_worker.profile_event(b"task", extra_data=extra_data):
try:
task_exception = False
if not (<int>task_type == <int>TASK_TYPE_ACTOR_TASK
and function_name == "__ray_terminate__"):
worker.reraise_actor_init_error()
worker.memory_monitor.raise_if_low_memory()
with core_worker.profile_event(b"task:deserialize_arguments"):
args, kwargs = deserialize_args(c_args, c_arg_reference_ids)
# Execute the task.
with ray.worker._changeproctitle(title, next_title):
with core_worker.profile_event(b"task:execute"):
task_exception = True
outputs = function_executor(*args, **kwargs)
# The function_executor is a generator in actor mode.
if inspect.isgenerator(outputs):
outputs = next(outputs)
task_exception = False
if c_return_ids.size() == 1:
outputs = (outputs,)
# Store the outputs in the object store.
with core_worker.profile_event(b"task:store_outputs"):
core_worker.store_task_outputs(
worker, outputs, c_return_ids, returns)
except Exception as error:
if (<int>task_type == <int>TASK_TYPE_ACTOR_CREATION_TASK):
worker.mark_actor_init_failed(error)
backtrace = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
if isinstance(error, RayTaskError):
# Avoid recursive nesting of RayTaskError.
failure_object = RayTaskError(function_name, backtrace,
error.cause_cls)
else:
failure_object = RayTaskError(function_name, backtrace,
error.__class__)
errors = []
for _ in range(c_return_ids.size()):
errors.append(failure_object)
core_worker.store_task_outputs(
worker, errors, c_return_ids, returns)
ray.utils.push_error_to_driver(
worker,
ray_constants.TASK_PUSH_ERROR,
str(failure_object),
job_id=worker.current_job_id)
# Send signal with the error.
ray_signal.send(ray_signal.ErrorSignal(str(failure_object)))
# Don't need to reset `current_job_id` if the worker is an
# actor. Because the following tasks should all have the
# same driver id.
if <int>task_type == <int>TASK_TYPE_NORMAL_TASK:
# Reset signal counters so that the next task can get
# all past signals.
ray_signal.reset()
if execution_info.max_calls != 0:
function_descriptor = FunctionDescriptor.from_bytes_list(
ray_function.GetFunctionDescriptor())
# Reset the state of the worker for the next task to execute.
# Increase the task execution counter.
manager.increase_task_counter(job_id, function_descriptor)
# If we've reached the max number of executions for this worker, exit.
task_counter = manager.get_task_counter(job_id, function_descriptor)
if task_counter == execution_info.max_calls:
exit = SystemExit(0)
exit.is_ray_terminate = True
raise exit
cdef CRayStatus task_execution_handler(
CTaskType task_type,
const CRayFunction &ray_function,
const unordered_map[c_string, double] &c_resources,
const c_vector[shared_ptr[CRayObject]] &c_args,
const c_vector[CObjectID] &c_arg_reference_ids,
const c_vector[CObjectID] &c_return_ids,
c_vector[shared_ptr[CRayObject]] *returns) nogil:
with gil:
try:
try:
# The call to execute_task should never raise an exception. If
# it does, that indicates that there was an internal error.
execute_task(task_type, ray_function, c_resources, c_args,
c_arg_reference_ids, c_return_ids, returns)
except Exception:
traceback_str = traceback.format_exc() + (
"An unexpected internal error occurred while the worker "
"was executing a task.")
ray.utils.push_error_to_driver(
ray.worker.global_worker,
"worker_crash",
traceback_str,
job_id=None)
sys.exit(1)
except SystemExit as e:
# Tell the core worker to exit as soon as the result objects
# are processed.
if hasattr(e, "is_ray_terminate"):
return CRayStatus.IntentionalSystemExit()
else:
logger.exception("SystemExit was raised from the worker")
return CRayStatus.UnexpectedSystemExit()
return CRayStatus.OK()
cdef CRayStatus check_signals() nogil:
with gil:
try:
PyErr_CheckSignals()
except KeyboardInterrupt:
return CRayStatus.Interrupted(b"")
return CRayStatus.OK()
cdef shared_ptr[CBuffer] string_to_buffer(c_string& c_str):
cdef shared_ptr[CBuffer] empty_metadata
if c_str.size() == 0:
return empty_metadata
return dynamic_pointer_cast[
CBuffer, LocalMemoryBuffer](
make_shared[LocalMemoryBuffer](
<uint8_t*>(c_str.data()), c_str.size(), True))
cdef write_serialized_object(
serialized_object, const shared_ptr[CBuffer]& buf):
# avoid initializing pyarrow before raylet
from ray.serialization import Pickle5SerializedObject, RawSerializedObject
if isinstance(serialized_object, RawSerializedObject):
buffer = Buffer.make(buf)
stream = pyarrow.FixedSizeBufferWriter(pyarrow.py_buffer(buffer))
stream.set_memcopy_threads(MEMCOPY_THREADS)
stream.write(pyarrow.py_buffer(serialized_object.value))
elif isinstance(serialized_object, Pickle5SerializedObject):
(<Pickle5Writer>serialized_object.writer).write_to(
serialized_object.inband, buf, MEMCOPY_THREADS)
else:
buffer = Buffer.make(buf)
stream = pyarrow.FixedSizeBufferWriter(pyarrow.py_buffer(buffer))
stream.set_memcopy_threads(MEMCOPY_THREADS)
serialized_object.serialized_object.write_to(stream)
cdef class CoreWorker:
def __cinit__(self, is_driver, store_socket, raylet_socket,
JobID job_id, GcsClientOptions gcs_options, log_dir,
node_ip_address, node_manager_port):
assert pyarrow is not None, ("Expected pyarrow to be imported from "
"outside _raylet. See __init__.py for "
"details.")
self.core_worker.reset(new CCoreWorker(
WORKER_TYPE_DRIVER if is_driver else WORKER_TYPE_WORKER,
LANGUAGE_PYTHON, store_socket.encode("ascii"),
raylet_socket.encode("ascii"), job_id.native(),
gcs_options.native()[0], log_dir.encode("utf-8"),
node_ip_address.encode("utf-8"), node_manager_port,
task_execution_handler, check_signals, True))
def run_task_loop(self):
with nogil:
self.core_worker.get().StartExecutingTasks()
def get_current_task_id(self):
return TaskID(self.core_worker.get().GetCurrentTaskId().Binary())
def get_current_job_id(self):
return JobID(self.core_worker.get().GetCurrentJobId().Binary())
def get_actor_id(self):
return ActorID(self.core_worker.get().GetActorId().Binary())
def set_webui_display(self, message):
self.core_worker.get().SetWebuiDisplay(message)
def get_objects(self, object_ids, TaskID current_task_id,
int64_t timeout_ms=-1):
cdef:
c_vector[shared_ptr[CRayObject]] results
CTaskID c_task_id = current_task_id.native()
c_vector[CObjectID] c_object_ids = ObjectIDsToVector(object_ids)
with nogil:
check_status(self.core_worker.get().Get(
c_object_ids, timeout_ms, &results))
return RayObjectsToDataMetadataPairs(results)
def object_exists(self, ObjectID object_id):
cdef:
c_bool has_object
CObjectID c_object_id = object_id.native()
with nogil:
check_status(self.core_worker.get().Contains(
c_object_id, &has_object))
return has_object
cdef _create_put_buffer(self, shared_ptr[CBuffer] &metadata,
size_t data_size, ObjectID object_id,
CObjectID *c_object_id, shared_ptr[CBuffer] *data):
delay = ray_constants.DEFAULT_PUT_OBJECT_DELAY
for attempt in reversed(
range(ray_constants.DEFAULT_PUT_OBJECT_RETRIES)):
try:
if object_id is None:
with nogil:
check_status(self.core_worker.get().Create(
metadata, data_size,
c_object_id, data))
else:
c_object_id[0] = object_id.native()
with nogil:
check_status(self.core_worker.get().Create(
metadata, data_size, c_object_id[0], data))
break
except ObjectStoreFullError as e:
if attempt:
logger.warning("Waiting {} seconds for space to free up "
"in the object store.".format(delay))
gc.collect()
time.sleep(delay)
delay *= 2
else:
self.dump_object_store_memory_usage()
raise e
# If data is nullptr, that means the ObjectID already existed,
# which we ignore.
# TODO(edoakes): this is hacky, we should return the error instead
# and deal with it here.
return data.get() == NULL
def put_serialized_object(self, serialized_object,
ObjectID object_id=None,
c_bool pin_object=True):
cdef:
CObjectID c_object_id
shared_ptr[CBuffer] data
shared_ptr[CBuffer] metadata
# The object won't be pinned if an ObjectID is provided by the
# user (because we can't track its lifetime to unpin). Note that
# the API to do this isn't supported as a public API.
c_bool owns_object = object_id is None
metadata = string_to_buffer(serialized_object.metadata)
total_bytes = serialized_object.total_bytes
object_already_exists = self._create_put_buffer(
metadata, total_bytes, object_id,
&c_object_id, &data)
if not object_already_exists:
write_serialized_object(serialized_object, data)
with nogil:
check_status(
self.core_worker.get().Seal(
c_object_id, owns_object, pin_object))
return ObjectID(c_object_id.Binary())
def wait(self, object_ids, int num_returns, int64_t timeout_ms,
TaskID current_task_id):
cdef:
WaitResultPair result
c_vector[CObjectID] wait_ids
c_vector[c_bool] results
CTaskID c_task_id = current_task_id.native()
wait_ids = ObjectIDsToVector(object_ids)
with nogil:
check_status(self.core_worker.get().Wait(
wait_ids, num_returns, timeout_ms, &results))
assert len(results) == len(object_ids)
ready, not_ready = [], []
for i, object_id in enumerate(object_ids):
if results[i]:
ready.append(object_id)
else:
not_ready.append(object_id)
return ready, not_ready
def free_objects(self, object_ids, c_bool local_only,
c_bool delete_creating_tasks):
cdef:
c_vector[CObjectID] free_ids = ObjectIDsToVector(object_ids)
with nogil:
check_status(self.core_worker.get().Delete(
free_ids, local_only, delete_creating_tasks))
def set_object_store_client_options(self, client_name,
int64_t limit_bytes):
try:
logger.debug("Setting plasma memory limit to {} for {}".format(
limit_bytes, client_name))
check_status(self.core_worker.get().SetClientOptions(
client_name.encode("ascii"), limit_bytes))
except RayError as e:
self.dump_object_store_memory_usage()
raise memory_monitor.RayOutOfMemoryError(
"Failed to set object_store_memory={} for {}. The "
"plasma store may have insufficient memory remaining "
"to satisfy this limit (30% of object store memory is "
"permanently reserved for shared usage). The current "
"object store memory status is:\n\n{}".format(
limit_bytes, client_name, e))
def dump_object_store_memory_usage(self):
message = self.core_worker.get().MemoryUsageString()
logger.warning("Local object store memory usage:\n{}\n".format(
message.decode("utf-8")))
def submit_task(self,
function_descriptor,
args,
int num_return_vals,
c_bool is_direct_call,
resources,
int max_retries):
cdef:
unordered_map[c_string, double] c_resources
CTaskOptions task_options
CRayFunction ray_function
c_vector[CTaskArg] args_vector
c_vector[CObjectID] return_ids
with self.profile_event(b"submit_task"):
prepare_resources(resources, &c_resources)
task_options = CTaskOptions(
num_return_vals, is_direct_call, c_resources)
ray_function = CRayFunction(
LANGUAGE_PYTHON, string_vector_from_list(function_descriptor))
prepare_args(args, &args_vector)
with nogil:
check_status(self.core_worker.get().SubmitTask(
ray_function, args_vector, task_options, &return_ids,
max_retries))
return VectorToObjectIDs(return_ids)
def create_actor(self,
function_descriptor,
args,
uint64_t max_reconstructions,
resources,
placement_resources,
c_bool is_direct_call,
int32_t max_concurrency,
c_bool is_detached,
c_bool is_asyncio):
cdef:
CRayFunction ray_function
c_vector[CTaskArg] args_vector
c_vector[c_string] dynamic_worker_options
unordered_map[c_string, double] c_resources
unordered_map[c_string, double] c_placement_resources
CActorID c_actor_id
with self.profile_event(b"submit_task"):
prepare_resources(resources, &c_resources)
prepare_resources(placement_resources, &c_placement_resources)
ray_function = CRayFunction(
LANGUAGE_PYTHON, string_vector_from_list(function_descriptor))
prepare_args(args, &args_vector)
with nogil:
check_status(self.core_worker.get().CreateActor(
ray_function, args_vector,
CActorCreationOptions(
max_reconstructions, is_direct_call, max_concurrency,
c_resources, c_placement_resources,
dynamic_worker_options, is_detached, is_asyncio),
&c_actor_id))
return ActorID(c_actor_id.Binary())
def submit_actor_task(self,
ActorID actor_id,
function_descriptor,
args,
int num_return_vals,
double num_method_cpus):
cdef:
CActorID c_actor_id = actor_id.native()
unordered_map[c_string, double] c_resources
CTaskOptions task_options
CRayFunction ray_function
c_vector[CTaskArg] args_vector
c_vector[CObjectID] return_ids
with self.profile_event(b"submit_task"):
if num_method_cpus > 0:
c_resources[b"CPU"] = num_method_cpus
task_options = CTaskOptions(num_return_vals, False, c_resources)
ray_function = CRayFunction(
LANGUAGE_PYTHON, string_vector_from_list(function_descriptor))
prepare_args(args, &args_vector)
with nogil:
check_status(self.core_worker.get().SubmitActorTask(
c_actor_id,
ray_function,
args_vector, task_options, &return_ids))
return VectorToObjectIDs(return_ids)
def kill_actor(self, ActorID actor_id):
cdef:
CActorID c_actor_id = actor_id.native()
with nogil:
check_status(self.core_worker.get().KillActor(
c_actor_id))
def resource_ids(self):
cdef:
ResourceMappingType resource_mapping = (
self.core_worker.get().GetResourceIDs())
unordered_map[
c_string, c_vector[pair[int64_t, double]]
].iterator iterator = resource_mapping.begin()
c_vector[pair[int64_t, double]] c_value
resources_dict = {}
while iterator != resource_mapping.end():
key = decode(dereference(iterator).first)
c_value = dereference(iterator).second
ids_and_fractions = []
for i in range(c_value.size()):
ids_and_fractions.append(
(c_value[i].first, c_value[i].second))
resources_dict[key] = ids_and_fractions
postincrement(iterator)
return resources_dict
def profile_event(self, c_string event_type, object extra_data=None):
return ProfileEvent.make(
self.core_worker.get().CreateProfileEvent(event_type),
extra_data)
def deserialize_and_register_actor_handle(self, const c_string &bytes):
c_actor_id = self.core_worker.get().DeserializeAndRegisterActorHandle(
bytes)
actor_id = ActorID(c_actor_id.Binary())
return actor_id
def serialize_actor_handle(self, ActorID actor_id):
cdef:
CActorID c_actor_id = actor_id.native()
c_string output
check_status(self.core_worker.get().SerializeActorHandle(
c_actor_id, &output))
return output
def add_object_id_reference(self, ObjectID object_id):
# Note: faster to not release GIL for short-running op.
self.core_worker.get().AddLocalReference(object_id.native())
def remove_object_id_reference(self, ObjectID object_id):
# Note: faster to not release GIL for short-running op.
self.core_worker.get().RemoveLocalReference(object_id.native())
def serialize_and_promote_object_id(self, ObjectID object_id):
cdef:
CObjectID c_object_id = object_id.native()
CTaskID c_owner_id = CTaskID.Nil()
CAddress c_owner_address = CAddress()
self.core_worker.get().PromoteToPlasmaAndGetOwnershipInfo(
c_object_id, &c_owner_id, &c_owner_address)
return (object_id,
TaskID(c_owner_id.Binary()),
c_owner_address.SerializeAsString())
def deserialize_and_register_object_id(
self, const c_string &object_id_binary, const c_string
&owner_id_binary, const c_string &serialized_owner_address):
cdef:
CObjectID c_object_id = CObjectID.FromBinary(object_id_binary)
CTaskID c_owner_id = CTaskID.FromBinary(owner_id_binary)
CAddress c_owner_address = CAddress()
c_owner_address.ParseFromString(serialized_owner_address)
self.core_worker.get().RegisterOwnershipInfoAndResolveFuture(
c_object_id,
c_owner_id,
c_owner_address)
# TODO: handle noreturn better
cdef store_task_outputs(
self, worker, outputs, const c_vector[CObjectID] return_ids,
c_vector[shared_ptr[CRayObject]] *returns):
cdef:
c_vector[size_t] data_sizes
c_vector[shared_ptr[CBuffer]] metadatas
if return_ids.size() == 0:
return
serialized_objects = []
for i in range(len(outputs)):
return_id, output = return_ids[i], outputs[i]
if isinstance(output, ray.actor.ActorHandle):
raise Exception("Returning an actor handle from a remote "
"function is not allowed).")
elif output is NoReturn:
serialized_objects.append(output)
data_sizes.push_back(0)
metadatas.push_back(string_to_buffer(b''))
else:
context = worker.get_serialization_context()
serialized_object = context.serialize(output)
data_sizes.push_back(serialized_object.total_bytes)
metadatas.push_back(
string_to_buffer(serialized_object.metadata))
serialized_objects.append(serialized_object)
check_status(self.core_worker.get().AllocateReturnObjects(
return_ids, data_sizes, metadatas, returns))
for i, serialized_object in enumerate(serialized_objects):
# A nullptr is returned if the object already exists.
if returns[0][i].get() == NULL:
continue
if serialized_object is NoReturn:
returns[0][i].reset()
else:
write_serialized_object(
serialized_object, returns[0][i].get().GetData())
def create_or_get_event_loop(self):
if self.async_event_loop is None:
self.async_event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.async_event_loop)
# Initialize the async plasma connection.
# Delayed import due to async_api depends on _raylet.
from ray.experimental.async_api import _async_init
self.async_event_loop.run_until_complete(_async_init())
if self.async_thread is None:
self.async_thread = threading.Thread(
target=lambda: self.async_event_loop.run_forever()
)
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.async_thread.daemon = True
self.async_thread.start()
return self.async_event_loop
def destory_event_loop_if_exists(self):
if self.async_event_loop is not None:
self.async_event_loop.stop()
if self.async_thread is not None:
self.async_thread.join()
def current_actor_is_asyncio(self):
return self.core_worker.get().GetWorkerContext().CurrentActorIsAsync()
def get_all_reference_counts(self):
cdef:
unordered_map[CObjectID, pair[size_t, size_t]] c_ref_counts
unordered_map[CObjectID, pair[size_t, size_t]].iterator it
c_ref_counts = self.core_worker.get().GetAllReferenceCounts()
it = c_ref_counts.begin()
ref_counts = {}
while it != c_ref_counts.end():
object_id = ObjectID(dereference(it).first.Binary())
ref_counts[object_id] = {
"local": dereference(it).second.first,
"submitted": dereference(it).second.second}
postincrement(it)
return ref_counts
def in_memory_store_get_async(self, ObjectID object_id, future):
self.core_worker.get().GetAsync(
object_id.native(),
async_set_result_callback,
async_retry_with_plasma_callback,
<void*>future)
cdef void async_set_result_callback(shared_ptr[CRayObject] obj,
CObjectID object_id,
void *future) with gil:
cdef:
c_vector[shared_ptr[CRayObject]] objects_to_deserialize
py_future = <object>(future)
loop = py_future._loop
# Object is retrieved from in memory store.
# Here we go through the code path used to deserialize objects.
objects_to_deserialize.push_back(obj)
data_metadata_pairs = RayObjectsToDataMetadataPairs(
objects_to_deserialize)
ids_to_deserialize = [ObjectID(object_id.Binary())]
objects = ray.worker.global_worker.deserialize_objects(
data_metadata_pairs, ids_to_deserialize)
loop.call_soon_threadsafe(lambda: py_future.set_result(
AsyncGetResponse(
plasma_fallback_id=None, result=objects[0])))
cdef void async_retry_with_plasma_callback(shared_ptr[CRayObject] obj,
CObjectID object_id,
void *future) with gil:
py_future = <object>(future)
loop = py_future._loop
loop.call_soon_threadsafe(lambda: py_future.set_result(
AsyncGetResponse(
plasma_fallback_id=ObjectID(object_id.Binary()),
result=None)))
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/actor.py | Python | import copy
import inspect
import logging
import six
import weakref
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from ray.function_manager import FunctionDescriptor
import ray.ray_constants as ray_constants
import ray._raylet
import ray.signature as signature
import ray.worker
from ray import ActorID, ActorClassID
logger = logging.getLogger(__name__)
def method(*args, **kwargs):
"""Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo:
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
"""
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method
# Create objects to wrap method invocations. This is done so that we can
# invoke methods with actor.method.remote() instead of actor.method().
class ActorMethod:
"""A class used to invoke an actor method.
Note: This class only keeps a weak ref to the actor, unless it has been
passed to a remote function. This avoids delays in GC of the actor.
Attributes:
_actor: A handle to the actor.
_method_name: The name of the actor method.
_num_return_vals: The default number of return values that the method
invocation should return.
_decorator: An optional decorator that should be applied to the actor
method invocation (as opposed to the actor method execution) before
invoking the method. The decorator must return a function that
takes in two arguments ("args" and "kwargs"). In most cases, it
should call the function that was passed into the decorator and
return the resulting ObjectIDs. For an example, see
"test_decorated_method" in "python/ray/tests/test_actor.py".
"""
def __init__(self,
actor,
method_name,
num_return_vals,
decorator=None,
hardref=False):
self._actor_ref = weakref.ref(actor)
self._method_name = method_name
self._num_return_vals = num_return_vals
# This is a decorator that is used to wrap the function invocation (as
# opposed to the function execution). The decorator must return a
# function that takes in two arguments ("args" and "kwargs"). In most
# cases, it should call the function that was passed into the decorator
# and return the resulting ObjectIDs.
self._decorator = decorator
# Acquire a hard ref to the actor, this is useful mainly when passing
# actor method handles to remote functions.
if hardref:
self._actor_hard_ref = actor
else:
self._actor_hard_ref = None
def __call__(self, *args, **kwargs):
raise Exception("Actor methods cannot be called directly. Instead "
"of running 'object.{}()', try "
"'object.{}.remote()'.".format(self._method_name,
self._method_name))
def remote(self, *args, **kwargs):
return self._remote(args, kwargs)
def _remote(self, args=None, kwargs=None, num_return_vals=None):
if num_return_vals is None:
num_return_vals = self._num_return_vals
def invocation(args, kwargs):
actor = self._actor_hard_ref or self._actor_ref()
if actor is None:
raise RuntimeError("Lost reference to actor")
return actor._actor_method_call(
self._method_name,
args=args,
kwargs=kwargs,
num_return_vals=num_return_vals)
# Apply the decorator if there is one.
if self._decorator is not None:
invocation = self._decorator(invocation)
return invocation(args, kwargs)
def __getstate__(self):
return {
"actor": self._actor_ref(),
"method_name": self._method_name,
"num_return_vals": self._num_return_vals,
"decorator": self._decorator,
}
def __setstate__(self, state):
self.__init__(
state["actor"],
state["method_name"],
state["num_return_vals"],
state["decorator"],
hardref=True)
class ActorClassMetadata:
"""Metadata for an actor class.
Attributes:
modified_class: The original class that was decorated (with some
additional methods added like __ray_terminate__).
class_id: The ID of this actor class.
class_name: The name of this class.
num_cpus: The default number of CPUs required by the actor creation
task.
num_gpus: The default number of GPUs required by the actor creation
task.
memory: The heap memory quota for this actor.
object_store_memory: The object store memory quota for this actor.
resources: The default resources required by the actor creation task.
actor_method_cpus: The number of CPUs required by actor method tasks.
last_export_session_and_job: A pair of the last exported session
and job to help us to know whether this function was exported.
This is an imperfect mechanism used to determine if we need to
export the remote function again. It is imperfect in the sense that
the actor class definition could be exported multiple times by
different workers.
actor_methods: The actor methods.
method_decorators: Optional decorators that should be applied to the
method invocation function before invoking the actor methods. These
can be set by attaching the attribute
"__ray_invocation_decorator__" to the actor method.
method_signatures: The signatures of the methods.
actor_method_names: The names of the actor methods.
actor_method_num_return_vals: The default number of return values for
each actor method.
"""
def __init__(self, modified_class, class_id, max_reconstructions, num_cpus,
num_gpus, memory, object_store_memory, resources):
self.modified_class = modified_class
self.class_id = class_id
self.class_name = modified_class.__name__
self.max_reconstructions = max_reconstructions
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.last_export_session_and_job = None
self.actor_methods = inspect.getmembers(
self.modified_class, ray.utils.is_function_or_method)
self.actor_method_names = [
method_name for method_name, _ in self.actor_methods
]
constructor_name = "__init__"
if constructor_name not in self.actor_method_names:
# Add __init__ if it does not exist.
# Actor creation will be executed with __init__ together.
# Assign an __init__ function will avoid many checks later on.
def __init__(self):
pass
self.modified_class.__init__ = __init__
self.actor_method_names.append(constructor_name)
self.actor_methods.append((constructor_name, __init__))
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
self.method_decorators = {}
self.method_signatures = {}
self.actor_method_num_return_vals = {}
for method_name, method in self.actor_methods:
# Whether or not this method requires binding of its first
# argument. For class and static methods, we do not want to bind
# the first argument, but we do for instance methods
is_bound = (ray.utils.is_class_method(method)
or ray.utils.is_static_method(self.modified_class,
method_name))
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, there may not be much the user can do about it.
self.method_signatures[method_name] = signature.extract_signature(
method, ignore_first=not is_bound)
# Set the default number of return values for this method.
if hasattr(method, "__ray_num_return_vals__"):
self.actor_method_num_return_vals[method_name] = (
method.__ray_num_return_vals__)
else:
self.actor_method_num_return_vals[method_name] = (
ray_constants.DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS)
if hasattr(method, "__ray_invocation_decorator__"):
self.method_decorators[method_name] = (
method.__ray_invocation_decorator__)
class ActorClass:
"""An actor class.
This is a decorated class. It can be used to create actors.
Attributes:
__ray_metadata__: Contains metadata for the actor.
"""
def __init__(cls, name, bases, attr):
"""Prevents users from directly inheriting from an ActorClass.
This will be called when a class is defined with an ActorClass object
as one of its base classes. To intentionally construct an ActorClass,
use the '_ray_from_modified_class' classmethod.
Raises:
TypeError: Always.
"""
for base in bases:
if isinstance(base, ActorClass):
raise TypeError("Attempted to define subclass '{}' of actor "
"class '{}'. Inheriting from actor classes is "
"not currently supported. You can instead "
"inherit from a non-actor base class and make "
"the derived class an actor class (with "
"@ray.remote).".format(
name, base.__ray_metadata__.class_name))
# This shouldn't be reached because one of the base classes must be
# an actor class if this was meant to be subclassed.
assert False, ("ActorClass.__init__ should not be called. Please use "
"the @ray.remote decorator instead.")
def __call__(self, *args, **kwargs):
"""Prevents users from directly instantiating an ActorClass.
This will be called instead of __init__ when 'ActorClass()' is executed
because an is an object rather than a metaobject. To properly
instantiated a remote actor, use 'ActorClass.remote()'.
Raises:
Exception: Always.
"""
raise Exception("Actors cannot be instantiated directly. "
"Instead of '{}()', use '{}.remote()'.".format(
self.__ray_metadata__.class_name,
self.__ray_metadata__.class_name))
@classmethod
def _ray_from_modified_class(cls, modified_class, class_id,
max_reconstructions, num_cpus, num_gpus,
memory, object_store_memory, resources):
for attribute in ["remote", "_remote", "_ray_from_modified_class"]:
if hasattr(modified_class, attribute):
logger.warning("Creating an actor from class {} overwrites "
"attribute {} of that class".format(
modified_class.__name__, attribute))
# Make sure the actor class we are constructing inherits from the
# original class so it retains all class properties.
class DerivedActorClass(cls, modified_class):
pass
name = "ActorClass({})".format(modified_class.__name__)
DerivedActorClass.__module__ = modified_class.__module__
DerivedActorClass.__name__ = name
DerivedActorClass.__qualname__ = name
# Construct the base object.
self = DerivedActorClass.__new__(DerivedActorClass)
self.__ray_metadata__ = ActorClassMetadata(
modified_class, class_id, max_reconstructions, num_cpus, num_gpus,
memory, object_store_memory, resources)
return self
def remote(self, *args, **kwargs):
"""Create an actor.
Args:
args: These arguments are forwarded directly to the actor
constructor.
kwargs: These arguments are forwarded directly to the actor
constructor.
Returns:
A handle to the newly created actor.
"""
return self._remote(args=args, kwargs=kwargs)
def options(self, **options):
"""Convenience method for creating an actor with options.
Same arguments as Actor._remote(), but returns a wrapped actor class
that a non-underscore .remote() can be called on.
Examples:
# The following two calls are equivalent.
>>> Actor._remote(num_cpus=4, max_concurrency=8, args=[x, y])
>>> Actor.options(num_cpus=4, max_concurrency=8).remote(x, y)
"""
actor_cls = self
class ActorOptionWrapper:
def remote(self, *args, **kwargs):
return actor_cls._remote(args=args, kwargs=kwargs, **options)
return ActorOptionWrapper()
def _remote(self,
args=None,
kwargs=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
is_direct_call=None,
max_concurrency=None,
name=None,
detached=False,
is_asyncio=False):
"""Create an actor.
This method allows more flexibility than the remote method because
resource requirements can be specified and override the defaults in the
decorator.
Args:
args: The arguments to forward to the actor constructor.
kwargs: The keyword arguments to forward to the actor constructor.
num_cpus: The number of CPUs required by the actor creation task.
num_gpus: The number of GPUs required by the actor creation task.
memory: Restrict the heap memory usage of this actor.
object_store_memory: Restrict the object store memory used by
this actor when creating objects.
resources: The custom resources required by the actor creation
task.
is_direct_call: Use direct actor calls.
max_concurrency: The max number of concurrent calls to allow for
this actor. This only works with direct actor calls. The max
concurrency defaults to 1 for threaded execution, and 1000 for
asyncio execution. Note that the execution order is not
guaranteed when max_concurrency > 1.
name: The globally unique name for the actor.
detached: Whether the actor should be kept alive after driver
exits.
is_asyncio: Turn on async actor calls. This only works with direct
actor calls.
Returns:
A handle to the newly created actor.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if is_direct_call is None:
is_direct_call = ray_constants.direct_call_enabled()
if max_concurrency is None:
if is_asyncio:
max_concurrency = 1000
else:
max_concurrency = 1
if max_concurrency > 1 and not is_direct_call:
raise ValueError(
"setting max_concurrency requires is_direct_call=True")
if max_concurrency < 1:
raise ValueError("max_concurrency must be >= 1")
if is_asyncio and not is_direct_call:
raise ValueError(
"Setting is_asyncio requires is_direct_call=True.")
worker = ray.worker.get_global_worker()
if worker.mode is None:
raise Exception("Actors cannot be created before ray.init() "
"has been called.")
meta = self.__ray_metadata__
if detached and name is None:
raise Exception("Detached actors must be named. "
"Please use Actor._remote(name='some_name') "
"to associate the name.")
# Check whether the name is already taken.
if name is not None:
try:
ray.experimental.get_actor(name)
except ValueError: # name is not taken, expected.
pass
else:
raise ValueError(
"The name {name} is already taken. Please use "
"a different name or get existing actor using "
"ray.experimental.get_actor('{name}')".format(name=name))
# Set the actor's default resources if not already set. First three
# conditions are to check that no resources were specified in the
# decorator. Last three conditions are to check that no resources were
# specified when _remote() was called.
if (meta.num_cpus is None and meta.num_gpus is None
and meta.resources is None and num_cpus is None
and num_gpus is None and resources is None):
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
cpus_to_use = ray_constants.DEFAULT_ACTOR_CREATION_CPU_SIMPLE
actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SIMPLE
else:
# If any resources are specified (here or in decorator), then
# all resources are acquired for the actor's lifetime and no
# resources are associated with methods.
cpus_to_use = (ray_constants.DEFAULT_ACTOR_CREATION_CPU_SPECIFIED
if meta.num_cpus is None else meta.num_cpus)
actor_method_cpu = ray_constants.DEFAULT_ACTOR_METHOD_CPU_SPECIFIED
function_name = "__init__"
function_descriptor = FunctionDescriptor(
meta.modified_class.__module__, function_name,
meta.modified_class.__name__)
# Do not export the actor class or the actor if run in LOCAL_MODE
# Instead, instantiate the actor locally and add it to the worker's
# dictionary
if worker.mode == ray.LOCAL_MODE:
actor_id = ActorID.from_random()
worker.actors[actor_id] = meta.modified_class(
*copy.deepcopy(args), **copy.deepcopy(kwargs))
else:
# Export the actor.
if (meta.last_export_session_and_job !=
worker.current_session_and_job):
# If this actor class was not exported in this session and job,
# we need to export this function again, because current GCS
# doesn't have it.
meta.last_export_session_and_job = (
worker.current_session_and_job)
worker.function_actor_manager.export_actor_class(
meta.modified_class, meta.actor_method_names)
resources = ray.utils.resources_from_resource_arguments(
cpus_to_use, meta.num_gpus, meta.memory,
meta.object_store_memory, meta.resources, num_cpus, num_gpus,
memory, object_store_memory, resources)
# If the actor methods require CPU resources, then set the required
# placement resources. If actor_placement_resources is empty, then
# the required placement resources will be the same as resources.
actor_placement_resources = {}
assert actor_method_cpu in [0, 1]
if actor_method_cpu == 1:
actor_placement_resources = resources.copy()
actor_placement_resources["CPU"] += 1
function_signature = meta.method_signatures[function_name]
creation_args = signature.flatten_args(function_signature, args,
kwargs)
actor_id = worker.core_worker.create_actor(
function_descriptor.get_function_descriptor_list(),
creation_args, meta.max_reconstructions, resources,
actor_placement_resources, is_direct_call, max_concurrency,
detached, is_asyncio)
actor_handle = ActorHandle(
actor_id,
meta.modified_class.__module__,
meta.class_name,
meta.actor_method_names,
meta.method_decorators,
meta.method_signatures,
meta.actor_method_num_return_vals,
actor_method_cpu,
worker.current_session_and_job,
original_handle=True)
if name is not None:
ray.experimental.register_actor(name, actor_handle)
return actor_handle
class ActorHandle:
"""A handle to an actor.
The fields in this class are prefixed with _ray_ to hide them from the user
and to avoid collision with actor method names.
An ActorHandle can be created in three ways. First, by calling .remote() on
an ActorClass. Second, by passing an actor handle into a task (forking the
ActorHandle). Third, by directly serializing the ActorHandle (e.g., with
cloudpickle).
Attributes:
_ray_actor_id: Actor ID.
_ray_module_name: The module name of this actor.
_ray_actor_method_names: The names of the actor methods.
_ray_method_decorators: Optional decorators for the function
invocation. This can be used to change the behavior on the
invocation side, whereas a regular decorator can be used to change
the behavior on the execution side.
_ray_method_signatures: The signatures of the actor methods.
_ray_method_num_return_vals: The default number of return values for
each method.
_ray_class_name: The name of the actor class.
_ray_actor_method_cpus: The number of CPUs required by actor methods.
_ray_original_handle: True if this is the original actor handle for a
given actor. If this is true, then the actor will be destroyed when
this handle goes out of scope.
"""
def __init__(self,
actor_id,
module_name,
class_name,
actor_method_names,
method_decorators,
method_signatures,
method_num_return_vals,
actor_method_cpus,
session_and_job,
original_handle=False):
self._ray_actor_id = actor_id
self._ray_module_name = module_name
self._ray_original_handle = original_handle
self._ray_actor_method_names = actor_method_names
self._ray_method_decorators = method_decorators
self._ray_method_signatures = method_signatures
self._ray_method_num_return_vals = method_num_return_vals
self._ray_class_name = class_name
self._ray_actor_method_cpus = actor_method_cpus
self._ray_session_and_job = session_and_job
self._ray_function_descriptor_lists = {
method_name: FunctionDescriptor(
self._ray_module_name, method_name,
self._ray_class_name).get_function_descriptor_list()
for method_name in self._ray_method_signatures.keys()
}
for method_name in actor_method_names:
method = ActorMethod(
self,
method_name,
self._ray_method_num_return_vals[method_name],
decorator=self._ray_method_decorators.get(method_name))
setattr(self, method_name, method)
def _actor_method_call(self,
method_name,
args=None,
kwargs=None,
num_return_vals=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
num_return_vals (int): The number of return values for the method.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
worker = ray.worker.get_global_worker()
args = args or []
kwargs = kwargs or {}
function_signature = self._ray_method_signatures[method_name]
if not args and not kwargs and not function_signature:
list_args = []
else:
list_args = signature.flatten_args(function_signature, args,
kwargs)
if worker.mode == ray.LOCAL_MODE:
function = getattr(worker.actors[self._actor_id], method_name)
object_ids = worker.local_mode_manager.execute(
function, method_name, args, kwargs, num_return_vals)
else:
object_ids = worker.core_worker.submit_actor_task(
self._ray_actor_id,
self._ray_function_descriptor_lists[method_name], list_args,
num_return_vals, self._ray_actor_method_cpus)
if len(object_ids) == 1:
object_ids = object_ids[0]
elif len(object_ids) == 0:
object_ids = None
return object_ids
# Make tab completion work.
def __dir__(self):
return self._ray_actor_method_names
def __repr__(self):
return "Actor({}, {})".format(self._ray_class_name,
self._actor_id.hex())
def __del__(self):
"""Terminate the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote(). TODO(rkn): Even without passing handles around,
# this is not the right policy. the actor should be alive as long as
# there are ANY handles in scope in the process that created the actor,
# not just the first one.
worker = ray.worker.get_global_worker()
exported_in_current_session_and_job = (
self._ray_session_and_job == worker.current_session_and_job)
if (worker.mode == ray.worker.SCRIPT_MODE
and not exported_in_current_session_and_job):
# If the worker is a driver and driver id has changed because
# Ray was shut down re-initialized, the actor is already cleaned up
# and we don't need to send `__ray_terminate__` again.
logger.warning(
"Actor is garbage collected in the wrong driver." +
" Actor id = %s, class name = %s.", self._ray_actor_id,
self._ray_class_name)
return
if worker.connected and self._ray_original_handle:
# Note: in py2 the weakref is destroyed prior to calling __del__
# so we need to set the hardref here briefly
try:
self.__ray_terminate__._actor_hard_ref = self
self.__ray_terminate__.remote()
finally:
self.__ray_terminate__._actor_hard_ref = None
def __ray_kill__(self):
"""Kill the actor that this actor handle refers to immediately.
This will cause any outstanding tasks submitted to the actor to fail
and the actor to exit in the same way as if it crashed. In general,
you should prefer to just delete the actor handle and let it clean up
gracefull.
Returns:
None.
"""
worker = ray.worker.get_global_worker()
worker.core_worker.kill_actor(self._ray_actor_id)
@property
def _actor_id(self):
return self._ray_actor_id
def _serialization_helper(self, ray_forking):
"""This is defined in order to make pickling work.
Args:
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
Returns:
A dictionary of the information needed to reconstruct the object.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
state = {
# Local mode just uses the actor ID.
"core_handle": worker.core_worker.serialize_actor_handle(
self._ray_actor_id)
if hasattr(worker, "core_worker") else self._ray_actor_id,
"module_name": self._ray_module_name,
"class_name": self._ray_class_name,
"actor_method_names": self._ray_actor_method_names,
"method_decorators": self._ray_method_decorators,
"method_signatures": self._ray_method_signatures,
"method_num_return_vals": self._ray_method_num_return_vals,
"actor_method_cpus": self._ray_actor_method_cpus
}
return state
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
self.__init__(
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# Local mode just uses the actor ID.
worker.core_worker.deserialize_and_register_actor_handle(
state["core_handle"])
if hasattr(worker, "core_worker") else state["core_handle"],
state["module_name"],
state["class_name"],
state["actor_method_names"],
state["method_decorators"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_method_cpus"],
worker.current_session_and_job)
def __getstate__(self):
"""This code path is used by pickling but not by Ray forking."""
return self._serialization_helper(False)
def __setstate__(self, state):
"""This code path is used by pickling but not by Ray forking."""
return self._deserialization_helper(state, False)
def make_actor(cls, num_cpus, num_gpus, memory, object_store_memory, resources,
max_reconstructions):
# Give an error if cls is an old-style class.
if not issubclass(cls, object):
raise TypeError(
"The @ray.remote decorator cannot be applied to old-style "
"classes. In Python 2, you must declare the class with "
"'class ClassName(object):' instead of 'class ClassName:'.")
if issubclass(cls, Checkpointable) and inspect.isabstract(cls):
raise TypeError(
"A checkpointable actor class should implement all abstract "
"methods in the `Checkpointable` interface.")
if max_reconstructions is None:
max_reconstructions = 0
if not (ray_constants.NO_RECONSTRUCTION <= max_reconstructions <=
ray_constants.INFINITE_RECONSTRUCTION):
raise Exception("max_reconstructions must be in range [%d, %d]." %
(ray_constants.NO_RECONSTRUCTION,
ray_constants.INFINITE_RECONSTRUCTION))
# Modify the class to have an additional method that will be used for
# terminating the worker.
class Class(cls):
def __ray_terminate__(self):
worker = ray.worker.get_global_worker()
if worker.mode != ray.LOCAL_MODE:
ray.actor.exit_actor()
def __ray_checkpoint__(self):
"""Save a checkpoint.
This task saves the current state of the actor, the current task
frontier according to the raylet, and the checkpoint index
(number of tasks executed so far).
"""
worker = ray.worker.global_worker
if not isinstance(self, ray.actor.Checkpointable):
raise Exception(
"__ray_checkpoint__.remote() may only be called on actors "
"that implement ray.actor.Checkpointable")
return worker._save_actor_checkpoint()
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
return ActorClass._ray_from_modified_class(
Class, ActorClassID.from_random(), max_reconstructions, num_cpus,
num_gpus, memory, object_store_memory, resources)
def exit_actor():
"""Intentionally exit the current actor.
This function is used to disconnect an actor and exit the worker.
Raises:
Exception: An exception is raised if this is a driver or this
worker is not an actor.
"""
worker = ray.worker.global_worker
if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil():
# Intentionally disconnect the core worker from the raylet so the
# raylet won't push an error message to the driver.
ray.disconnect()
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Set a flag to indicate this is an intentional actor exit. This
# reduces log verbosity.
exit = SystemExit(0)
exit.is_ray_terminate = True
raise exit
assert False, "This process should have terminated."
else:
raise Exception("exit_actor called on a non-actor worker.")
ray.worker.global_worker.make_actor = make_actor
CheckpointContext = namedtuple(
"CheckpointContext",
[
# Actor's ID.
"actor_id",
# Number of tasks executed since last checkpoint.
"num_tasks_since_last_checkpoint",
# Time elapsed since last checkpoint, in milliseconds.
"time_elapsed_ms_since_last_checkpoint",
],
)
"""A namedtuple that contains information about actor's last checkpoint."""
Checkpoint = namedtuple(
"Checkpoint",
[
# ID of this checkpoint.
"checkpoint_id",
# The timestamp at which this checkpoint was saved,
# represented as milliseconds elapsed since Unix epoch.
"timestamp",
],
)
"""A namedtuple that represents a checkpoint."""
class Checkpointable(six.with_metaclass(ABCMeta, object)):
"""An interface that indicates an actor can be checkpointed."""
@abstractmethod
def should_checkpoint(self, checkpoint_context):
"""Whether this actor needs to be checkpointed.
This method will be called after every task. You should implement this
callback to decide whether this actor needs to be checkpointed at this
time, based on the checkpoint context, or any other factors.
Args:
checkpoint_context: A namedtuple that contains info about last
checkpoint.
Returns:
A boolean value that indicates whether this actor needs to be
checkpointed.
"""
pass
@abstractmethod
def save_checkpoint(self, actor_id, checkpoint_id):
"""Save a checkpoint to persistent storage.
If `should_checkpoint` returns true, this method will be called. You
should implement this callback to save actor's checkpoint and the given
checkpoint id to persistent storage.
Args:
actor_id: Actor's ID.
checkpoint_id: ID of this checkpoint. You should save it together
with actor's checkpoint data. And it will be used by the
`load_checkpoint` method.
Returns:
None.
"""
pass
@abstractmethod
def load_checkpoint(self, actor_id, available_checkpoints):
"""Load actor's previous checkpoint, and restore actor's state.
This method will be called when an actor is reconstructed, after
actor's constructor.
If the actor needs to restore from previous checkpoint, this function
should restore actor's state and return the checkpoint ID. Otherwise,
it should do nothing and return None.
Note, this method must return one of the checkpoint IDs in the
`available_checkpoints` list, or None. Otherwise, an exception will be
raised.
Args:
actor_id: Actor's ID.
available_checkpoints: A list of `Checkpoint` namedtuples that
contains all available checkpoint IDs and their timestamps,
sorted by timestamp in descending order.
Returns:
The ID of the checkpoint from which the actor was resumed, or None
if the actor should restart from the beginning.
"""
pass
@abstractmethod
def checkpoint_expired(self, actor_id, checkpoint_id):
"""Delete an expired checkpoint.
This method will be called when an checkpoint is expired. You should
implement this method to delete your application checkpoint data.
Note, the maximum number of checkpoints kept in the backend can be
configured at `RayConfig.num_actor_checkpoints_to_keep`.
Args:
actor_id: ID of the actor.
checkpoint_id: ID of the checkpoint that has expired.
Returns:
None.
"""
pass
def get_checkpoints_for_actor(actor_id):
"""Get the available checkpoints for the given actor ID, return a list
sorted by checkpoint timestamp in descending order.
"""
checkpoint_info = ray.state.state.actor_checkpoint_info(actor_id)
if checkpoint_info is None:
return []
checkpoints = [
Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in
zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"])
]
return sorted(
checkpoints,
key=lambda checkpoint: checkpoint.timestamp,
reverse=True,
)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/async_compat.py | Python | """
This file should only be imported from Python 3.
It will raise SyntaxError when importing from Python 2.
"""
import asyncio
from collections import namedtuple
import time
import ray
def sync_to_async(func):
"""Convert a blocking function to async function"""
async def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
# Class encapsulate the get result from direct actor.
# Case 1: plasma_fallback_id=None, result=<Object>
# Case 2: plasma_fallback_id=ObjectID, result=None
AsyncGetResponse = namedtuple("AsyncGetResponse",
["plasma_fallback_id", "result"])
def get_async(object_id):
"""Asyncio compatible version of ray.get"""
# Delayed import because raylet import this file and
# it creates circular imports.
from ray.experimental.async_api import init as async_api_init, as_future
from ray.experimental.async_plasma import PlasmaObjectFuture
assert isinstance(object_id, ray.ObjectID), "Batched get is not supported."
# Setup
async_api_init()
loop = asyncio.get_event_loop()
core_worker = ray.worker.global_worker.core_worker
# Here's the callback used to implement async get logic.
# What we want:
# - If direct call, first try to get it from in memory store.
# If the object if promoted to plasma, retry it from plasma API.
# - If not direct call, directly use plasma API to get it.
user_future = loop.create_future()
# We have three future objects here.
# user_future is directly returned to the user from this function.
# and it will be eventually fulfilled by the final result.
# inner_future is the first attempt to retrieve the object. It can be
# fulfilled by either core_worker.get_async or plasma_api.as_future.
# When inner_future completes, done_callback will be invoked. This
# callback set the final object in user_future if the object hasn't
# been promoted by plasma, otherwise it will retry from plasma.
# retry_plasma_future is only created when we are getting objects that's
# promoted to plasma. It will also invoke the done_callback when it's
# fulfilled.
def done_callback(future):
result = future.result()
# Result from async plasma, transparently pass it to user future
if isinstance(future, PlasmaObjectFuture):
if isinstance(result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.as_instanceof_cause())
else:
user_future.set_result(result)
else:
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(
result.result.as_instanceof_cause())
else:
user_future.set_result(result.result)
else:
# Schedule plasma to async get, use the the same callback.
retry_plasma_future = as_future(result.plasma_fallback_id)
retry_plasma_future.add_done_callback(done_callback)
# A hack to keep reference to the future so it doesn't get GC.
user_future.retry_plasma_future = retry_plasma_future
if object_id.is_direct_call_type():
inner_future = loop.create_future()
core_worker.in_memory_store_get_async(object_id, inner_future)
else:
inner_future = as_future(object_id)
inner_future.add_done_callback(done_callback)
# A hack to keep reference to inner_future so it doesn't get GC.
user_future.inner_future = inner_future
# A hack to keep a reference to the object ID for ref counting.
user_future.object_id = object_id
return user_future
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/autoscaler.py | Python | import copy
import hashlib
import json
import logging
import math
import os
import subprocess
import threading
import time
from collections import defaultdict
import numpy as np
import ray.services as services
import yaml
from ray.worker import global_worker
from ray.autoscaler.docker import dockerize_if_needed
from ray.autoscaler.node_provider import get_node_provider, \
get_default_config
from ray.autoscaler.tags import (TAG_RAY_LAUNCH_CONFIG, TAG_RAY_RUNTIME_CONFIG,
TAG_RAY_NODE_STATUS, TAG_RAY_NODE_TYPE,
TAG_RAY_NODE_NAME, STATUS_UP_TO_DATE,
STATUS_UNINITIALIZED, NODE_TYPE_WORKER)
from ray.autoscaler.updater import NodeUpdaterThread
from ray.ray_constants import AUTOSCALER_MAX_NUM_FAILURES, \
AUTOSCALER_MAX_LAUNCH_BATCH, AUTOSCALER_MAX_CONCURRENT_LAUNCHES, \
AUTOSCALER_UPDATE_INTERVAL_S, AUTOSCALER_HEARTBEAT_TIMEOUT_S, \
AUTOSCALER_RESOURCE_REQUEST_CHANNEL, MEMORY_RESOURCE_UNIT_BYTES
from six import string_types
from six.moves import queue
logger = logging.getLogger(__name__)
REQUIRED, OPTIONAL = True, False
# For (a, b), if a is a dictionary object, then
# no extra fields can be introduced.
CLUSTER_CONFIG_SCHEMA = {
# An unique identifier for the head node and workers of this cluster.
"cluster_name": (str, REQUIRED),
# The minimum number of workers nodes to launch in addition to the head
# node. This number should be >= 0.
"min_workers": (int, OPTIONAL),
# The maximum number of workers nodes to launch in addition to the head
# node. This takes precedence over min_workers.
"max_workers": (int, REQUIRED),
# The number of workers to launch initially, in addition to the head node.
"initial_workers": (int, OPTIONAL),
# The mode of the autoscaler e.g. default, aggressive
"autoscaling_mode": (str, OPTIONAL),
# The autoscaler will scale up the cluster to this target fraction of
# resources usage. For example, if a cluster of 8 nodes is 100% busy
# and target_utilization was 0.8, it would resize the cluster to 10.
"target_utilization_fraction": (float, OPTIONAL),
# If a node is idle for this many minutes, it will be removed.
"idle_timeout_minutes": (int, OPTIONAL),
# Cloud-provider specific configuration.
"provider": (
{
"type": (str, REQUIRED), # e.g. aws
"region": (str, OPTIONAL), # e.g. us-east-1
"availability_zone": (str, OPTIONAL), # e.g. us-east-1a
"module": (str,
OPTIONAL), # module, if using external node provider
"project_id": (None, OPTIONAL), # gcp project id, if using gcp
"head_ip": (str, OPTIONAL), # local cluster head node
"worker_ips": (list, OPTIONAL), # local cluster worker nodes
"use_internal_ips": (bool, OPTIONAL), # don't require public ips
"namespace": (str, OPTIONAL), # k8s namespace, if using k8s
# k8s autoscaler permissions, if using k8s
"autoscaler_service_account": (dict, OPTIONAL),
"autoscaler_role": (dict, OPTIONAL),
"autoscaler_role_binding": (dict, OPTIONAL),
"extra_config": (dict, OPTIONAL), # provider-specific config
# Whether to try to reuse previously stopped nodes instead of
# launching nodes. This will also cause the autoscaler to stop
# nodes instead of terminating them. Only implemented for AWS.
"cache_stopped_nodes": (bool, OPTIONAL),
},
REQUIRED),
# How Ray will authenticate with newly launched nodes.
"auth": (
{
"ssh_user": (str, OPTIONAL), # e.g. ubuntu
"ssh_private_key": (str, OPTIONAL),
},
OPTIONAL),
# Docker configuration. If this is specified, all setup and start commands
# will be executed in the container.
"docker": (
{
"image": (str, OPTIONAL), # e.g. tensorflow/tensorflow:1.5.0-py3
"container_name": (str, OPTIONAL), # e.g., ray_docker
"pull_before_run": (bool, OPTIONAL), # run `docker pull` first
# shared options for starting head/worker docker
"run_options": (list, OPTIONAL),
# image for head node, takes precedence over "image" if specified
"head_image": (str, OPTIONAL),
# head specific run options, appended to run_options
"head_run_options": (list, OPTIONAL),
# analogous to head_image
"worker_image": (str, OPTIONAL),
# analogous to head_run_options
"worker_run_options": (list, OPTIONAL),
},
OPTIONAL),
# Provider-specific config for the head node, e.g. instance type.
"head_node": (dict, OPTIONAL),
# Provider-specific config for worker nodes. e.g. instance type.
"worker_nodes": (dict, OPTIONAL),
# Map of remote paths to local paths, e.g. {"/tmp/data": "/my/local/data"}
"file_mounts": (dict, OPTIONAL),
# List of commands that will be run before `setup_commands`. If docker is
# enabled, these commands will run outside the container and before docker
# is setup.
"initialization_commands": (list, OPTIONAL),
# List of common shell commands to run to setup nodes.
"setup_commands": (list, OPTIONAL),
# Commands that will be run on the head node after common setup.
"head_setup_commands": (list, OPTIONAL),
# Commands that will be run on worker nodes after common setup.
"worker_setup_commands": (list, OPTIONAL),
# Command to start ray on the head node. You shouldn't need to modify this.
"head_start_ray_commands": (list, OPTIONAL),
# Command to start ray on worker nodes. You shouldn't need to modify this.
"worker_start_ray_commands": (list, OPTIONAL),
# Whether to avoid restarting the cluster during updates. This field is
# controlled by the ray --no-restart flag and cannot be set by the user.
"no_restart": (None, OPTIONAL),
}
class LoadMetrics:
"""Container for cluster load metrics.
Metrics here are updated from raylet heartbeats. The autoscaler
queries these metrics to determine when to scale up, and which nodes
can be removed.
"""
def __init__(self):
self.last_used_time_by_ip = {}
self.last_heartbeat_time_by_ip = {}
self.static_resources_by_ip = {}
self.dynamic_resources_by_ip = {}
self.resource_load_by_ip = {}
self.local_ip = services.get_node_ip_address()
def update(self, ip, static_resources, dynamic_resources, resource_load):
self.resource_load_by_ip[ip] = resource_load
self.static_resources_by_ip[ip] = static_resources
# We are not guaranteed to have a corresponding dynamic resource for
# every static resource because dynamic resources are based on the
# available resources in the heartbeat, which does not exist if it is
# zero. Thus, we have to update dynamic resources here.
dynamic_resources_update = dynamic_resources.copy()
for resource_name, capacity in static_resources.items():
if resource_name not in dynamic_resources_update:
dynamic_resources_update[resource_name] = 0.0
self.dynamic_resources_by_ip[ip] = dynamic_resources_update
now = time.time()
if ip not in self.last_used_time_by_ip or \
static_resources != dynamic_resources:
self.last_used_time_by_ip[ip] = now
self.last_heartbeat_time_by_ip[ip] = now
def mark_active(self, ip):
assert ip is not None, "IP should be known at this time"
logger.info("Node {} is newly setup, treating as active".format(ip))
self.last_heartbeat_time_by_ip[ip] = time.time()
def prune_active_ips(self, active_ips):
active_ips = set(active_ips)
active_ips.add(self.local_ip)
def prune(mapping):
unwanted = set(mapping) - active_ips
for unwanted_key in unwanted:
logger.info("LoadMetrics: "
"Removed mapping: {} - {}".format(
unwanted_key, mapping[unwanted_key]))
del mapping[unwanted_key]
if unwanted:
logger.info(
"LoadMetrics: "
"Removed {} stale ip mappings: {} not in {}".format(
len(unwanted), unwanted, active_ips))
assert not (unwanted & set(mapping))
prune(self.last_used_time_by_ip)
prune(self.static_resources_by_ip)
prune(self.dynamic_resources_by_ip)
prune(self.resource_load_by_ip)
prune(self.last_heartbeat_time_by_ip)
def approx_workers_used(self):
return self._info()["NumNodesUsed"]
def num_workers_connected(self):
return self._info()["NumNodesConnected"]
def get_resource_usage(self):
num_nodes = len(self.static_resources_by_ip)
nodes_used = 0.0
num_nonidle = 0
has_saturated_node = False
resources_used = {}
resources_total = {}
for ip, max_resources in self.static_resources_by_ip.items():
avail_resources = self.dynamic_resources_by_ip[ip]
resource_load = self.resource_load_by_ip[ip]
max_frac = 0.0
for resource_id, amount in resource_load.items():
if amount > 0:
has_saturated_node = True
max_frac = 1.0 # the resource is saturated
for resource_id, amount in max_resources.items():
used = amount - avail_resources[resource_id]
if resource_id not in resources_used:
resources_used[resource_id] = 0.0
resources_total[resource_id] = 0.0
resources_used[resource_id] += used
resources_total[resource_id] += amount
used = max(0, used)
if amount > 0:
frac = used / float(amount)
if frac > max_frac:
max_frac = frac
nodes_used += max_frac
if max_frac > 0:
num_nonidle += 1
# If any nodes have a queue buildup, assume all non-idle nodes are 100%
# busy, plus the head node. This guards against the case of not scaling
# up due to poor task packing.
if has_saturated_node:
nodes_used = min(num_nonidle + 1.0, num_nodes)
return nodes_used, resources_used, resources_total
def info_string(self):
return ", ".join(
["{}={}".format(k, v) for k, v in sorted(self._info().items())])
def _info(self):
nodes_used, resources_used, resources_total = self.get_resource_usage()
now = time.time()
idle_times = [now - t for t in self.last_used_time_by_ip.values()]
heartbeat_times = [
now - t for t in self.last_heartbeat_time_by_ip.values()
]
most_delayed_heartbeats = sorted(
self.last_heartbeat_time_by_ip.items(),
key=lambda pair: pair[1])[:5]
most_delayed_heartbeats = {
ip: (now - t)
for ip, t in most_delayed_heartbeats
}
def format_resource(key, value):
if key in ["object_store_memory", "memory"]:
return "{} GiB".format(
round(value * MEMORY_RESOURCE_UNIT_BYTES / 1e9, 2))
else:
return round(value, 2)
return {
"ResourceUsage": ", ".join([
"{}/{} {}".format(
format_resource(rid, resources_used[rid]),
format_resource(rid, resources_total[rid]), rid)
for rid in sorted(resources_used)
]),
"NumNodesConnected": len(self.static_resources_by_ip),
"NumNodesUsed": round(nodes_used, 2),
"NodeIdleSeconds": "Min={} Mean={} Max={}".format(
int(np.min(idle_times)) if idle_times else -1,
int(np.mean(idle_times)) if idle_times else -1,
int(np.max(idle_times)) if idle_times else -1),
"TimeSinceLastHeartbeat": "Min={} Mean={} Max={}".format(
int(np.min(heartbeat_times)) if heartbeat_times else -1,
int(np.mean(heartbeat_times)) if heartbeat_times else -1,
int(np.max(heartbeat_times)) if heartbeat_times else -1),
"MostDelayedHeartbeats": most_delayed_heartbeats,
}
class NodeLauncher(threading.Thread):
def __init__(self, provider, queue, pending, index=None, *args, **kwargs):
self.queue = queue
self.pending = pending
self.provider = provider
self.index = str(index) if index is not None else ""
super(NodeLauncher, self).__init__(*args, **kwargs)
def _launch_node(self, config, count):
worker_filter = {TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER}
before = self.provider.non_terminated_nodes(tag_filters=worker_filter)
launch_hash = hash_launch_conf(config["worker_nodes"], config["auth"])
self.log("Launching {} nodes.".format(count))
self.provider.create_node(
config["worker_nodes"], {
TAG_RAY_NODE_NAME: "ray-{}-worker".format(
config["cluster_name"]),
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER,
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
TAG_RAY_LAUNCH_CONFIG: launch_hash,
}, count)
after = self.provider.non_terminated_nodes(tag_filters=worker_filter)
if set(after).issubset(before):
self.log("No new nodes reported after node creation.")
def run(self):
while True:
config, count = self.queue.get()
self.log("Got {} nodes to launch.".format(count))
try:
self._launch_node(config, count)
except Exception:
logger.exception("Launch failed")
finally:
self.pending.dec(count)
def log(self, statement):
prefix = "NodeLauncher{}:".format(self.index)
logger.info(prefix + " {}".format(statement))
class ConcurrentCounter:
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def inc(self, count):
with self._lock:
self._value += count
return self._value
def dec(self, count):
with self._lock:
assert self._value >= count, "counter cannot go negative"
self._value -= count
return self._value
@property
def value(self):
with self._lock:
return self._value
class StandardAutoscaler:
"""The autoscaling control loop for a Ray cluster.
There are two ways to start an autoscaling cluster: manually by running
`ray start --head --autoscaling-config=/path/to/config.yaml` on a
instance that has permission to launch other instances, or you can also use
`ray create_or_update /path/to/config.yaml` from your laptop, which will
configure the right AWS/Cloud roles automatically.
StandardAutoscaler's `update` method is periodically called by `monitor.py`
to add and remove nodes as necessary. Currently, load-based autoscaling is
not implemented, so all this class does is try to maintain a constant
cluster size.
StandardAutoscaler is also used to bootstrap clusters (by adding workers
until the target cluster size is met).
"""
def __init__(self,
config_path,
load_metrics,
max_launch_batch=AUTOSCALER_MAX_LAUNCH_BATCH,
max_concurrent_launches=AUTOSCALER_MAX_CONCURRENT_LAUNCHES,
max_failures=AUTOSCALER_MAX_NUM_FAILURES,
process_runner=subprocess,
update_interval_s=AUTOSCALER_UPDATE_INTERVAL_S):
self.config_path = config_path
self.reload_config(errors_fatal=True)
self.load_metrics = load_metrics
self.provider = get_node_provider(self.config["provider"],
self.config["cluster_name"])
self.max_failures = max_failures
self.max_launch_batch = max_launch_batch
self.max_concurrent_launches = max_concurrent_launches
self.process_runner = process_runner
# Map from node_id to NodeUpdater processes
self.updaters = {}
self.num_failed_updates = defaultdict(int)
self.num_successful_updates = defaultdict(int)
self.num_failures = 0
self.last_update_time = 0.0
self.update_interval_s = update_interval_s
self.bringup = True
# Node launchers
self.launch_queue = queue.Queue()
self.num_launches_pending = ConcurrentCounter()
max_batches = math.ceil(
max_concurrent_launches / float(max_launch_batch))
for i in range(int(max_batches)):
node_launcher = NodeLauncher(
provider=self.provider,
queue=self.launch_queue,
index=i,
pending=self.num_launches_pending)
node_launcher.daemon = True
node_launcher.start()
# Expand local file_mounts to allow ~ in the paths. This can't be done
# earlier when the config is written since we might be on different
# platform and the expansion would result in wrong path.
self.config["file_mounts"] = {
remote: os.path.expanduser(local)
for remote, local in self.config["file_mounts"].items()
}
for local_path in self.config["file_mounts"].values():
assert os.path.exists(local_path)
self.resource_requests = defaultdict(int)
logger.info("StandardAutoscaler: {}".format(self.config))
def update(self):
try:
self.reload_config(errors_fatal=False)
self._update()
except Exception as e:
logger.exception("StandardAutoscaler: "
"Error during autoscaling.")
self.num_failures += 1
if self.num_failures > self.max_failures:
logger.critical("StandardAutoscaler: "
"Too many errors, abort.")
raise e
def _update(self):
now = time.time()
# Throttle autoscaling updates to this interval to avoid exceeding
# rate limits on API calls.
if now - self.last_update_time < self.update_interval_s:
return
self.last_update_time = now
num_pending = self.num_launches_pending.value
nodes = self.workers()
self.load_metrics.prune_active_ips(
[self.provider.internal_ip(node_id) for node_id in nodes])
target_workers = self.target_num_workers()
if len(nodes) >= target_workers:
if "CPU" in self.resource_requests:
del self.resource_requests["CPU"]
self.log_info_string(nodes, target_workers)
# Terminate any idle or out of date nodes
last_used = self.load_metrics.last_used_time_by_ip
horizon = now - (60 * self.config["idle_timeout_minutes"])
nodes_to_terminate = []
for node_id in nodes:
node_ip = self.provider.internal_ip(node_id)
if node_ip in last_used and last_used[node_ip] < horizon and \
len(nodes) - len(nodes_to_terminate) > target_workers:
logger.info("StandardAutoscaler: "
"{}: Terminating idle node".format(node_id))
nodes_to_terminate.append(node_id)
elif not self.launch_config_ok(node_id):
logger.info("StandardAutoscaler: "
"{}: Terminating outdated node".format(node_id))
nodes_to_terminate.append(node_id)
if nodes_to_terminate:
self.provider.terminate_nodes(nodes_to_terminate)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Terminate nodes if there are too many
nodes_to_terminate = []
while len(nodes) > self.config["max_workers"]:
logger.info("StandardAutoscaler: "
"{}: Terminating unneeded node".format(nodes[-1]))
nodes_to_terminate.append(nodes[-1])
nodes = nodes[:-1]
if nodes_to_terminate:
self.provider.terminate_nodes(nodes_to_terminate)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Launch new nodes if needed
num_workers = len(nodes) + num_pending
if num_workers < target_workers:
max_allowed = min(self.max_launch_batch,
self.max_concurrent_launches - num_pending)
num_launches = min(max_allowed, target_workers - num_workers)
self.launch_new_node(num_launches)
nodes = self.workers()
self.log_info_string(nodes, target_workers)
elif self.load_metrics.num_workers_connected() >= target_workers:
logger.info("Ending bringup phase")
self.bringup = False
self.log_info_string(nodes, target_workers)
# Process any completed updates
completed = []
for node_id, updater in self.updaters.items():
if not updater.is_alive():
completed.append(node_id)
if completed:
for node_id in completed:
if self.updaters[node_id].exitcode == 0:
self.num_successful_updates[node_id] += 1
else:
self.num_failed_updates[node_id] += 1
del self.updaters[node_id]
# Mark the node as active to prevent the node recovery logic
# immediately trying to restart Ray on the new node.
self.load_metrics.mark_active(self.provider.internal_ip(node_id))
nodes = self.workers()
self.log_info_string(nodes, target_workers)
# Update nodes with out-of-date files.
# TODO(edoakes): Spawning these threads directly seems to cause
# problems. They should at a minimum be spawned as daemon threads.
# See https://github.com/ray-project/ray/pull/5903 for more info.
T = []
for node_id, commands, ray_start in (self.should_update(node_id)
for node_id in nodes):
if node_id is not None:
T.append(
threading.Thread(
target=self.spawn_updater,
args=(node_id, commands, ray_start)))
for t in T:
t.start()
for t in T:
t.join()
# Attempt to recover unhealthy nodes
for node_id in nodes:
self.recover_if_needed(node_id, now)
def reload_config(self, errors_fatal=False):
try:
with open(self.config_path) as f:
new_config = yaml.safe_load(f.read())
validate_config(new_config)
new_launch_hash = hash_launch_conf(new_config["worker_nodes"],
new_config["auth"])
new_runtime_hash = hash_runtime_conf(new_config["file_mounts"], [
new_config["worker_setup_commands"],
new_config["worker_start_ray_commands"]
])
self.config = new_config
self.launch_hash = new_launch_hash
self.runtime_hash = new_runtime_hash
except Exception as e:
if errors_fatal:
raise e
else:
logger.exception("StandardAutoscaler: "
"Error parsing config.")
def target_num_workers(self):
target_frac = self.config["target_utilization_fraction"]
cur_used = self.load_metrics.approx_workers_used()
ideal_num_nodes = int(np.ceil(cur_used / float(target_frac)))
ideal_num_workers = ideal_num_nodes - 1 # subtract 1 for head node
initial_workers = self.config["initial_workers"]
aggressive = self.config["autoscaling_mode"] == "aggressive"
if self.bringup:
ideal_num_workers = max(ideal_num_workers, initial_workers)
elif aggressive and cur_used > 0:
# If we want any workers, we want at least initial_workers
ideal_num_workers = max(ideal_num_workers, initial_workers)
# Other resources are not supported at present.
if "CPU" in self.resource_requests:
try:
cores_per_worker = self.config["worker_nodes"]["Resources"][
"CPU"]
except KeyError:
cores_per_worker = 1 # Assume the worst
cores_desired = self.resource_requests["CPU"]
ideal_num_workers = max(
ideal_num_workers,
int(np.ceil(cores_desired / cores_per_worker)))
return min(self.config["max_workers"],
max(self.config["min_workers"], ideal_num_workers))
def launch_config_ok(self, node_id):
launch_conf = self.provider.node_tags(node_id).get(
TAG_RAY_LAUNCH_CONFIG)
if self.launch_hash != launch_conf:
return False
return True
def files_up_to_date(self, node_id):
applied = self.provider.node_tags(node_id).get(TAG_RAY_RUNTIME_CONFIG)
if applied != self.runtime_hash:
logger.info("StandardAutoscaler: "
"{}: Runtime state is {}, want {}".format(
node_id, applied, self.runtime_hash))
return False
return True
def recover_if_needed(self, node_id, now):
if not self.can_update(node_id):
return
key = self.provider.internal_ip(node_id)
if key not in self.load_metrics.last_heartbeat_time_by_ip:
self.load_metrics.last_heartbeat_time_by_ip[key] = now
last_heartbeat_time = self.load_metrics.last_heartbeat_time_by_ip[key]
delta = now - last_heartbeat_time
if delta < AUTOSCALER_HEARTBEAT_TIMEOUT_S:
return
logger.warning("StandardAutoscaler: "
"{}: No heartbeat in {}s, "
"restarting Ray to recover...".format(node_id, delta))
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts={},
initialization_commands=[],
setup_commands=[],
ray_start_commands=with_head_node_ip(
self.config["worker_start_ray_commands"]),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True)
updater.start()
self.updaters[node_id] = updater
def should_update(self, node_id):
if not self.can_update(node_id):
return None, None, None # no update
status = self.provider.node_tags(node_id).get(TAG_RAY_NODE_STATUS)
if status == STATUS_UP_TO_DATE and self.files_up_to_date(node_id):
return None, None, None # no update
successful_updated = self.num_successful_updates.get(node_id, 0) > 0
if successful_updated and self.config.get("restart_only", False):
init_commands = []
ray_commands = self.config["worker_start_ray_commands"]
elif successful_updated and self.config.get("no_restart", False):
init_commands = self.config["worker_setup_commands"]
ray_commands = []
else:
init_commands = self.config["worker_setup_commands"]
ray_commands = self.config["worker_start_ray_commands"]
return (node_id, init_commands, ray_commands)
def spawn_updater(self, node_id, init_commands, ray_start_commands):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts=self.config["file_mounts"],
initialization_commands=with_head_node_ip(
self.config["initialization_commands"]),
setup_commands=with_head_node_ip(init_commands),
ray_start_commands=with_head_node_ip(ray_start_commands),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True)
updater.start()
self.updaters[node_id] = updater
def can_update(self, node_id):
if node_id in self.updaters:
return False
if not self.launch_config_ok(node_id):
return False
if self.num_failed_updates.get(node_id, 0) > 0: # TODO(ekl) retry?
return False
return True
def launch_new_node(self, count):
logger.info(
"StandardAutoscaler: Queue {} new nodes for launch".format(count))
self.num_launches_pending.inc(count)
config = copy.deepcopy(self.config)
self.launch_queue.put((config, count))
def workers(self):
return self.provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER})
def log_info_string(self, nodes, target):
logger.info("StandardAutoscaler: {}".format(
self.info_string(nodes, target)))
logger.info("LoadMetrics: {}".format(self.load_metrics.info_string()))
def info_string(self, nodes, target):
suffix = ""
if self.num_launches_pending:
suffix += " ({} pending)".format(self.num_launches_pending.value)
if self.updaters:
suffix += " ({} updating)".format(len(self.updaters))
if self.num_failed_updates:
suffix += " ({} failed to update)".format(
len(self.num_failed_updates))
if self.bringup:
suffix += " (bringup=True)"
return "{}/{} target nodes{}".format(len(nodes), target, suffix)
def request_resources(self, resources):
for resource, count in resources.items():
self.resource_requests[resource] = max(
self.resource_requests[resource], count)
logger.info("StandardAutoscaler: resource_requests={}".format(
self.resource_requests))
def kill_workers(self):
logger.error("StandardAutoscaler: kill_workers triggered")
nodes = self.workers()
if nodes:
self.provider.terminate_nodes(nodes)
logger.error("StandardAutoscaler: terminated {} node(s)".format(
len(nodes)))
def typename(v):
if isinstance(v, type):
return v.__name__
else:
return type(v).__name__
def check_required(config, schema):
# Check required schema entries
if not isinstance(config, dict):
raise ValueError("Config is not a dictionary")
for k, (v, kreq) in schema.items():
if v is None:
continue # None means we don't validate the field
if kreq is REQUIRED:
if k not in config:
type_str = typename(v)
raise ValueError(
"Missing required config key `{}` of type {}".format(
k, type_str))
if not isinstance(v, type):
check_required(config[k], v)
def check_extraneous(config, schema):
"""Make sure all items of config are in schema"""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
for k in config:
if k not in schema:
raise ValueError("Unexpected config key `{}` not in {}".format(
k, list(schema.keys())))
v, kreq = schema[k]
if v is None:
continue
elif isinstance(v, type):
if not isinstance(config[k], v):
if v is str and isinstance(config[k], string_types):
continue
raise ValueError(
"Config key `{}` has wrong type {}, expected {}".format(
k,
type(config[k]).__name__, v.__name__))
else:
check_extraneous(config[k], v)
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA):
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
check_required(config, schema)
check_extraneous(config, schema)
def fillout_defaults(config):
defaults = get_default_config(config["provider"])
defaults.update(config)
merge_setup_commands(defaults)
dockerize_if_needed(defaults)
defaults["auth"] = defaults.get("auth", {})
return defaults
def merge_setup_commands(config):
config["head_setup_commands"] = (
config["setup_commands"] + config["head_setup_commands"])
config["worker_setup_commands"] = (
config["setup_commands"] + config["worker_setup_commands"])
return config
def with_head_node_ip(cmds):
head_ip = services.get_node_ip_address()
out = []
for cmd in cmds:
out.append("export RAY_HEAD_IP={}; {}".format(head_ip, cmd))
return out
def hash_launch_conf(node_conf, auth):
hasher = hashlib.sha1()
hasher.update(
json.dumps([node_conf, auth], sort_keys=True).encode("utf-8"))
return hasher.hexdigest()
# Cache the file hashes to avoid rescanning it each time. Also, this avoids
# inadvertently restarting workers if the file mount content is mutated on the
# head node.
_hash_cache = {}
def hash_runtime_conf(file_mounts, extra_objs):
hasher = hashlib.sha1()
def add_content_hashes(path):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
hasher.update(chunk)
path = os.path.expanduser(path)
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
hasher.update(dirpath.encode("utf-8"))
for name in filenames:
hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Important: only hash the files once. Otherwise, we can end up restarting
# workers if the files were changed and we re-hashed them.
if conf_str not in _hash_cache:
hasher.update(conf_str)
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
_hash_cache[conf_str] = hasher.hexdigest()
return _hash_cache[conf_str]
def request_resources(num_cpus=None, num_gpus=None):
"""Remotely request some CPU or GPU resources from the autoscaler.
This function is to be called e.g. on a node before submitting a bunch of
ray.remote calls to ensure that resources rapidly become available.
In the future this could be extended to do GPU cores or other custom
resources.
This function is non blocking.
Args:
num_cpus: int -- the number of CPU cores to request
num_gpus: int -- the number of GPUs to request (Not implemented)
"""
if num_gpus is not None:
raise NotImplementedError(
"GPU resource is not yet supported through request_resources")
r = services.create_redis_client(
global_worker.node.redis_address,
password=global_worker.node.redis_password)
assert isinstance(num_cpus, int)
if num_cpus > 0:
r.publish(AUTOSCALER_RESOURCE_REQUEST_CHANNEL,
json.dumps({
"CPU": num_cpus
}))
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/aws/config.py | Python | from distutils.version import StrictVersion
import json
import os
import time
import logging
import boto3
from botocore.config import Config
import botocore
from ray.ray_constants import BOTO_MAX_RETRIES
logger = logging.getLogger(__name__)
RAY = "ray-autoscaler"
DEFAULT_RAY_INSTANCE_PROFILE = RAY + "-v1"
DEFAULT_RAY_IAM_ROLE = RAY + "-v1"
SECURITY_GROUP_TEMPLATE = RAY + "-{}"
assert StrictVersion(boto3.__version__) >= StrictVersion("1.4.8"), \
"Boto3 version >= 1.4.8 required, try `pip install -U boto3`"
def key_pair(i, region):
"""Returns the ith default (aws_key_pair_name, key_pair_path)."""
if i == 0:
return ("{}_{}".format(RAY, region),
os.path.expanduser("~/.ssh/{}_{}.pem".format(RAY, region)))
return ("{}_{}_{}".format(RAY, i, region),
os.path.expanduser("~/.ssh/{}_{}_{}.pem".format(RAY, i, region)))
# Suppress excessive connection dropped logs from boto
logging.getLogger("botocore").setLevel(logging.WARNING)
def bootstrap_aws(config):
# The head node needs to have an IAM role that allows it to create further
# EC2 instances.
config = _configure_iam_role(config)
# Configure SSH access, using an existing key pair if possible.
config = _configure_key_pair(config)
# Pick a reasonable subnet if not specified by the user.
config = _configure_subnet(config)
# Cluster workers should be in a security group that permits traffic within
# the group, and also SSH access from outside.
config = _configure_security_group(config)
return config
def _configure_iam_role(config):
if "IamInstanceProfile" in config["head_node"]:
return config
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
if profile is None:
logger.info("_configure_iam_role: "
"Creating new instance profile {}".format(
DEFAULT_RAY_INSTANCE_PROFILE))
client = _client("iam", config)
client.create_instance_profile(
InstanceProfileName=DEFAULT_RAY_INSTANCE_PROFILE)
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
time.sleep(15) # wait for propagation
assert profile is not None, "Failed to create instance profile"
if not profile.roles:
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
if role is None:
logger.info("_configure_iam_role: "
"Creating new role {}".format(DEFAULT_RAY_IAM_ROLE))
iam = _resource("iam", config)
iam.create_role(
RoleName=DEFAULT_RAY_IAM_ROLE,
AssumeRolePolicyDocument=json.dumps({
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole",
},
],
}))
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
assert role is not None, "Failed to create role"
role.attach_policy(
PolicyArn="arn:aws:iam::aws:policy/AmazonEC2FullAccess")
role.attach_policy(
PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess")
profile.add_role(RoleName=role.name)
time.sleep(15) # wait for propagation
logger.info("_configure_iam_role: "
"Role not specified for head node, using {}".format(
profile.arn))
config["head_node"]["IamInstanceProfile"] = {"Arn": profile.arn}
return config
def _configure_key_pair(config):
if "ssh_private_key" in config["auth"]:
assert "KeyName" in config["head_node"]
assert "KeyName" in config["worker_nodes"]
return config
ec2 = _resource("ec2", config)
# Try a few times to get or create a good key pair.
MAX_NUM_KEYS = 20
for i in range(MAX_NUM_KEYS):
key_name, key_path = key_pair(i, config["provider"]["region"])
key = _get_key(key_name, config)
# Found a good key.
if key and os.path.exists(key_path):
break
# We can safely create a new key.
if not key and not os.path.exists(key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
key = ec2.create_key_pair(KeyName=key_name)
with open(key_path, "w") as f:
f.write(key.key_material)
os.chmod(key_path, 0o600)
break
if not key:
raise ValueError(
"No matching local key file for any of the key pairs in this "
"account with ids from 0..{}. ".format(key_name) +
"Consider deleting some unused keys pairs from your account.")
assert os.path.exists(key_path), \
"Private key file {} not found for {}".format(key_path, key_name)
logger.info("_configure_key_pair: "
"KeyName not specified for nodes, using {}".format(key_name))
config["auth"]["ssh_private_key"] = key_path
config["head_node"]["KeyName"] = key_name
config["worker_nodes"]["KeyName"] = key_name
return config
def _configure_subnet(config):
ec2 = _resource("ec2", config)
use_internal_ips = config["provider"].get("use_internal_ips", False)
subnets = sorted(
(s for s in ec2.subnets.all() if s.state == "available" and (
use_internal_ips or s.map_public_ip_on_launch)),
reverse=True, # sort from Z-A
key=lambda subnet: subnet.availability_zone)
if not subnets:
raise Exception(
"No usable subnets found, try manually creating an instance in "
"your specified region to populate the list of subnets "
"and trying this again. Note that the subnet must map public IPs "
"on instance launch unless you set 'use_internal_ips': True in "
"the 'provider' config.")
if "availability_zone" in config["provider"]:
azs = config["provider"]["availability_zone"].split(",")
subnets = [s for s in subnets if s.availability_zone in azs]
if not subnets:
raise Exception(
"No usable subnets matching availability zone {} "
"found. Choose a different availability zone or try "
"manually creating an instance in your specified region "
"to populate the list of subnets and trying this again.".
format(config["provider"]["availability_zone"]))
subnet_ids = [s.subnet_id for s in subnets]
subnet_descr = [(s.subnet_id, s.availability_zone) for s in subnets]
if "SubnetIds" not in config["head_node"]:
config["head_node"]["SubnetIds"] = subnet_ids
logger.info("_configure_subnet: "
"SubnetIds not specified for head node, using {}".format(
subnet_descr))
if "SubnetIds" not in config["worker_nodes"]:
config["worker_nodes"]["SubnetIds"] = subnet_ids
logger.info("_configure_subnet: "
"SubnetId not specified for workers,"
" using {}".format(subnet_descr))
return config
def _configure_security_group(config):
if "SecurityGroupIds" in config["head_node"] and \
"SecurityGroupIds" in config["worker_nodes"]:
return config # have user-defined groups
group_name = SECURITY_GROUP_TEMPLATE.format(config["cluster_name"])
vpc_id = _get_vpc_id_or_die(config, config["worker_nodes"]["SubnetIds"][0])
security_group = _get_security_group(config, vpc_id, group_name)
if security_group is None:
logger.info("_configure_security_group: "
"Creating new security group {}".format(group_name))
client = _client("ec2", config)
client.create_security_group(
Description="Auto-created security group for Ray workers",
GroupName=group_name,
VpcId=vpc_id)
security_group = _get_security_group(config, vpc_id, group_name)
assert security_group, "Failed to create security group"
if not security_group.ip_permissions:
security_group.authorize_ingress(IpPermissions=[{
"FromPort": -1,
"ToPort": -1,
"IpProtocol": "-1",
"UserIdGroupPairs": [{
"GroupId": security_group.id
}]
}, {
"FromPort": 22,
"ToPort": 22,
"IpProtocol": "TCP",
"IpRanges": [{
"CidrIp": "0.0.0.0/0"
}]
}])
if "SecurityGroupIds" not in config["head_node"]:
logger.info(
"_configure_security_group: "
"SecurityGroupIds not specified for head node, using {}".format(
security_group.group_name))
config["head_node"]["SecurityGroupIds"] = [security_group.id]
if "SecurityGroupIds" not in config["worker_nodes"]:
logger.info(
"_configure_security_group: "
"SecurityGroupIds not specified for workers, using {}".format(
security_group.group_name))
config["worker_nodes"]["SecurityGroupIds"] = [security_group.id]
return config
def _get_vpc_id_or_die(config, subnet_id):
ec2 = _resource("ec2", config)
subnet = list(
ec2.subnets.filter(Filters=[{
"Name": "subnet-id",
"Values": [subnet_id]
}]))
assert len(subnet) == 1, "Subnet not found"
subnet = subnet[0]
return subnet.vpc_id
def _get_security_group(config, vpc_id, group_name):
ec2 = _resource("ec2", config)
existing_groups = list(
ec2.security_groups.filter(Filters=[{
"Name": "vpc-id",
"Values": [vpc_id]
}]))
for sg in existing_groups:
if sg.group_name == group_name:
return sg
def _get_role(role_name, config):
iam = _resource("iam", config)
role = iam.Role(role_name)
try:
role.load()
return role
except botocore.exceptions.ClientError as exc:
if exc.response.get("Error", {}).get("Code") == "NoSuchEntity":
return None
else:
raise exc
def _get_instance_profile(profile_name, config):
iam = _resource("iam", config)
profile = iam.InstanceProfile(profile_name)
try:
profile.load()
return profile
except botocore.exceptions.ClientError as exc:
if exc.response.get("Error", {}).get("Code") == "NoSuchEntity":
return None
else:
raise exc
def _get_key(key_name, config):
ec2 = _resource("ec2", config)
for key in ec2.key_pairs.filter(Filters=[{
"Name": "key-name",
"Values": [key_name]
}]):
if key.name == key_name:
return key
def _client(name, config):
boto_config = Config(retries={"max_attempts": BOTO_MAX_RETRIES})
return boto3.client(name, config["provider"]["region"], config=boto_config)
def _resource(name, config):
boto_config = Config(retries={"max_attempts": BOTO_MAX_RETRIES})
return boto3.resource(
name, config["provider"]["region"], config=boto_config)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/aws/node_provider.py | Python | import random
import threading
from collections import defaultdict
import logging
import boto3
import botocore
from botocore.config import Config
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_TYPE
from ray.ray_constants import BOTO_MAX_RETRIES, BOTO_CREATE_MAX_RETRIES
from ray.autoscaler.log_timer import LogTimer
logger = logging.getLogger(__name__)
def to_aws_format(tags):
"""Convert the Ray node name tag to the AWS-specific 'Name' tag."""
if TAG_RAY_NODE_NAME in tags:
tags["Name"] = tags[TAG_RAY_NODE_NAME]
del tags[TAG_RAY_NODE_NAME]
return tags
def from_aws_format(tags):
"""Convert the AWS-specific 'Name' tag to the Ray node name tag."""
if "Name" in tags:
tags[TAG_RAY_NODE_NAME] = tags["Name"]
del tags["Name"]
return tags
def make_ec2_client(region, max_retries):
"""Make client, retrying requests up to `max_retries`."""
config = Config(retries={"max_attempts": max_retries})
return boto3.resource("ec2", region_name=region, config=config)
class AWSNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
self.ec2 = make_ec2_client(
region=provider_config["region"], max_retries=BOTO_MAX_RETRIES)
self.ec2_fail_fast = make_ec2_client(
region=provider_config["region"], max_retries=0)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
self.tag_cache = {} # Tags that we believe to actually be on EC2.
self.tag_cache_pending = {} # Tags that we will soon upload.
self.tag_cache_lock = threading.Lock()
self.tag_cache_update_event = threading.Event()
self.tag_cache_kill_event = threading.Event()
self.tag_update_thread = threading.Thread(
target=self._node_tag_update_loop)
self.tag_update_thread.start()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def _node_tag_update_loop(self):
"""Update the AWS tags for a cluster periodically.
The purpose of this loop is to avoid excessive EC2 calls when a large
number of nodes are being launched simultaneously.
"""
while True:
self.tag_cache_update_event.wait()
self.tag_cache_update_event.clear()
batch_updates = defaultdict(list)
with self.tag_cache_lock:
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id].update(tags)
self.tag_cache_pending = {}
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AWSNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.ec2.meta.client.create_tags(
Resources=node_ids,
Tags=[{
"Key": k,
"Value": v
}],
)
self.tag_cache_kill_event.wait(timeout=5)
if self.tag_cache_kill_event.is_set():
return
def non_terminated_nodes(self, tag_filters):
# Note that these filters are acceptable because they are set on
# node initialization, and so can never be sitting in the cache.
tag_filters = to_aws_format(tag_filters)
filters = [
{
"Name": "instance-state-name",
"Values": ["pending", "running"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
]
for k, v in tag_filters.items():
filters.append({
"Name": "tag:{}".format(k),
"Values": [v],
})
nodes = list(self.ec2.instances.filter(Filters=filters))
# Populate the tag cache with initial information if necessary
for node in nodes:
if node.id in self.tag_cache:
continue
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
self.cached_nodes = {node.id: node for node in nodes}
return [node.id for node in nodes]
def is_running(self, node_id):
node = self._get_cached_node(node_id)
return node.state["Name"] == "running"
def is_terminated(self, node_id):
node = self._get_cached_node(node_id)
state = node.state["Name"]
return state not in ["running", "pending"]
def node_tags(self, node_id):
with self.tag_cache_lock:
d1 = self.tag_cache[node_id]
d2 = self.tag_cache_pending.get(node_id, {})
return dict(d1, **d2)
def external_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.public_ip_address is None:
node = self._get_node(node_id)
return node.public_ip_address
def internal_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.private_ip_address is None:
node = self._get_node(node_id)
return node.private_ip_address
def set_node_tags(self, node_id, tags):
with self.tag_cache_lock:
try:
self.tag_cache_pending[node_id].update(tags)
except KeyError:
self.tag_cache_pending[node_id] = tags
self.tag_cache_update_event.set()
def create_node(self, node_config, tags, count):
# Try to reuse previously stopped nodes with compatible configs
if self.cache_stopped_nodes:
filters = [
{
"Name": "instance-state-name",
"Values": ["stopped", "stopping"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
{
"Name": "tag:{}".format(TAG_RAY_NODE_TYPE),
"Values": [tags[TAG_RAY_NODE_TYPE]],
},
{
"Name": "tag:{}".format(TAG_RAY_LAUNCH_CONFIG),
"Values": [tags[TAG_RAY_LAUNCH_CONFIG]],
},
]
reuse_nodes = list(
self.ec2.instances.filter(Filters=filters))[:count]
reuse_node_ids = [n.id for n in reuse_nodes]
if reuse_nodes:
logger.info("AWSNodeProvider: reusing instances {}. "
"To disable reuse, set "
"'cache_stopped_nodes: False' in the provider "
"config.".format(reuse_node_ids))
for node in reuse_nodes:
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
if node.state["Name"] == "stopping":
logger.info("AWSNodeProvider: waiting for instance "
"{} to fully stop...".format(node.id))
node.wait_until_stopped()
self.ec2.meta.client.start_instances(
InstanceIds=reuse_node_ids)
for node_id in reuse_node_ids:
self.set_node_tags(node_id, tags)
count -= len(reuse_node_ids)
if count:
self._create_node(node_config, tags, count)
def _create_node(self, node_config, tags, count):
tags = to_aws_format(tags)
conf = node_config.copy()
# Delete unsupported keys from the node config
try:
del conf["Resources"]
except KeyError:
pass
tag_pairs = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}]
for k, v in tags.items():
tag_pairs.append({
"Key": k,
"Value": v,
})
tag_specs = [{
"ResourceType": "instance",
"Tags": tag_pairs,
}]
user_tag_specs = conf.get("TagSpecifications", [])
# Allow users to add tags and override values of existing
# tags with their own. This only applies to the resource type
# "instance". All other resource types are appended to the list of
# tag specs.
for user_tag_spec in user_tag_specs:
if user_tag_spec["ResourceType"] == "instance":
for user_tag in user_tag_spec["Tags"]:
exists = False
for tag in tag_specs[0]["Tags"]:
if user_tag["Key"] == tag["Key"]:
exists = True
tag["Value"] = user_tag["Value"]
break
if not exists:
tag_specs[0]["Tags"] += [user_tag]
else:
tag_specs += [user_tag_spec]
# SubnetIds is not a real config key: we must resolve to a
# single SubnetId before invoking the AWS API.
subnet_ids = conf.pop("SubnetIds")
for attempt in range(1, BOTO_CREATE_MAX_RETRIES + 1):
try:
subnet_id = subnet_ids[self.subnet_idx % len(subnet_ids)]
logger.info("NodeProvider: calling create_instances "
"with {} (count={}).".format(subnet_id, count))
self.subnet_idx += 1
conf.update({
"MinCount": 1,
"MaxCount": count,
"SubnetId": subnet_id,
"TagSpecifications": tag_specs
})
created = self.ec2_fail_fast.create_instances(**conf)
for instance in created:
logger.info("NodeProvider: Created instance "
"[id={}, name={}, info={}]".format(
instance.instance_id,
instance.state["Name"],
instance.state_reason["Message"]))
break
except botocore.exceptions.ClientError as exc:
if attempt == BOTO_CREATE_MAX_RETRIES:
logger.error(
"create_instances: Max attempts ({}) exceeded.".format(
BOTO_CREATE_MAX_RETRIES))
raise exc
else:
logger.error(exc)
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
logger.info(
"AWSNodeProvider: terminating node {} (spot nodes cannot "
"be stopped, only terminated)".format(node_id))
node.terminate()
else:
logger.info(
"AWSNodeProvider: stopping node {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.".format(node_id))
node.stop()
else:
node.terminate()
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def terminate_nodes(self, node_ids):
if not node_ids:
return
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
logger.info(
"AWSNodeProvider: stopping nodes {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.".format(on_demand_ids))
self.ec2.meta.client.stop_instances(InstanceIds=on_demand_ids)
if spot_ids:
logger.info(
"AWSNodeProvider: terminating nodes {} (spot nodes cannot "
"be stopped, only terminated)".format(spot_ids))
self.ec2.meta.client.terminate_instances(InstanceIds=spot_ids)
else:
self.ec2.meta.client.terminate_instances(InstanceIds=node_ids)
for node_id in node_ids:
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
def cleanup(self):
self.tag_cache_update_event.set()
self.tag_cache_kill_event.set()
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/commands.py | Python | import copy
import hashlib
import json
import os
import tempfile
import time
import logging
import sys
import click
import random
import yaml
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
from ray.autoscaler.autoscaler import validate_config, hash_runtime_conf, \
hash_launch_conf, fillout_defaults
from ray.autoscaler.node_provider import get_node_provider, NODE_PROVIDERS
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, TAG_RAY_LAUNCH_CONFIG, \
TAG_RAY_NODE_NAME, NODE_TYPE_WORKER, NODE_TYPE_HEAD
from ray.autoscaler.updater import NodeUpdaterThread
from ray.autoscaler.log_timer import LogTimer
from ray.autoscaler.docker import with_docker_exec
logger = logging.getLogger(__name__)
def create_or_update_cluster(config_file, override_min_workers,
override_max_workers, no_restart, restart_only,
yes, override_cluster_name):
"""Create or updates an autoscaling Ray cluster from a config json."""
config = yaml.safe_load(open(config_file).read())
if override_min_workers is not None:
config["min_workers"] = override_min_workers
if override_max_workers is not None:
config["max_workers"] = override_max_workers
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name)
def _bootstrap_config(config):
config = fillout_defaults(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(tempfile.gettempdir(),
"ray-config-{}".format(hasher.hexdigest()))
if os.path.exists(cache_key):
return json.loads(open(cache_key).read())
validate_config(config)
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
bootstrap_config, _ = importer()
resolved_config = bootstrap_config(config)
with open(cache_key, "w") as f:
f.write(json.dumps(resolved_config))
return resolved_config
def teardown_cluster(config_file, yes, workers_only, override_cluster_name):
"""Destroys all nodes of a Ray cluster described by a config json."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = fillout_defaults(config)
validate_config(config)
confirm("This will destroy your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
def remaining_nodes():
if workers_only:
A = []
else:
A = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD
})
A += provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
return A
# Loop here to check that both the head and worker nodes are actually
# really gone
A = remaining_nodes()
with LogTimer("teardown_cluster: done."):
while A:
logger.info("teardown_cluster: "
"Shutting down {} nodes...".format(len(A)))
provider.terminate_nodes(A)
time.sleep(1)
A = remaining_nodes()
finally:
provider.cleanup()
def kill_node(config_file, yes, hard, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
confirm("This will kill a node in your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
node = random.choice(nodes)
logger.info("kill_node: Shutdown worker {}".format(node))
if hard:
provider.terminate_node(node)
else:
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="")
_exec(updater, "ray stop", False, False)
time.sleep(5)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip
def monitor_cluster(cluster_config_file, num_lines, override_cluster_name):
"""Kills a random Raylet worker."""
cmd = "tail -n {} -f /tmp/ray/session_*/logs/monitor*".format(num_lines)
exec_cluster(cluster_config_file, cmd, False, False, False, False, False,
override_cluster_name, None)
def get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
config_file = os.path.abspath(config_file)
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if head_node is None or provider.node_tags(head_node).get(
TAG_RAY_LAUNCH_CONFIG) != launch_hash:
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated",
yes)
logger.info(
"get_or_create_head_node: "
"Shutting down outdated head node {}".format(head_node))
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"])
provider.create_node(config["head_node"], head_node_tags, 1)
start = time.time()
head_node = None
while True:
if time.time() - start > 5:
raise RuntimeError("Failed to create head node.")
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) == 1:
head_node = nodes[0]
break
time.sleep(1)
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_config = copy.deepcopy(config)
if config["provider"]["type"] != "kubernetes":
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile(
"w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update({
"~/ray_bootstrap_config.yaml": remote_config_file.name
})
if config["provider"]["type"] != "kubernetes":
config["file_mounts"].update({
remote_key_path: config["auth"]["ssh_private_key"],
})
if restart_only:
init_commands = []
ray_start_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
ray_start_commands = []
else:
init_commands = config["head_setup_commands"]
ray_start_commands = config["head_start_ray_commands"]
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
ray_start_commands=ray_start_commands,
runtime_hash=runtime_hash,
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error("get_or_create_head_node: "
"Updating {} failed".format(head_node_ip))
sys.exit(1)
logger.info(
"get_or_create_head_node: "
"Head node up-to-date, IP address is: {}".format(head_node_ip))
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = "docker" in config and bool(
config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(
quote(override_cluster_name))
else:
modifiers = ""
print("To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file, "--docker " if use_docker else "",
quote(monitor_str), modifiers))
print("To open a console on the cluster:\n\n"
" ray attach {}{}\n".format(config_file, modifiers))
print("To get a remote shell to the cluster manually, run:\n\n"
" {}\n".format(updater.cmd_runner.remote_shell_command_str()))
finally:
provider.cleanup()
def attach_cluster(config_file, start, use_screen, use_tmux,
override_cluster_name, new):
"""Attaches to a screen for the specified cluster.
Arguments:
config_file: path to the cluster yaml
start: whether to start the cluster if it isn't up
use_screen: whether to use screen as multiplexer
use_tmux: whether to use tmux as multiplexer
override_cluster_name: set the name of the cluster
new: whether to force a new screen
"""
if use_tmux:
if new:
cmd = "tmux new"
else:
cmd = "tmux attach || tmux new"
elif use_screen:
if new:
cmd = "screen -L"
else:
cmd = "screen -L -xRR"
else:
if new:
raise ValueError(
"--new only makes sense if passing --screen or --tmux")
cmd = "$SHELL"
exec_cluster(config_file, cmd, False, False, False, False, start,
override_cluster_name, None)
def exec_cluster(config_file, cmd, docker, screen, tmux, stop, start,
override_cluster_name, port_forward):
"""Runs a command on the specified cluster.
Arguments:
config_file: path to the cluster yaml
cmd: command to run
docker: whether to run command in docker container of config
screen: whether to run in a screen
tmux: whether to run in a tmux session
stop: whether to stop the cluster after command run
start: whether to start the cluster if it isn't up
override_cluster_name: set the name of the cluster
port_forward (int or list[int]): port(s) to forward
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=start)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
def wrap_docker(command):
container_name = config["docker"]["container_name"]
if not container_name:
raise ValueError("Docker container not specified in config.")
return with_docker_exec(
[command], container_name=container_name)[0]
cmd = wrap_docker(cmd) if docker else cmd
if stop:
shutdown_cmd = (
"ray stop; ray teardown ~/ray_bootstrap_config.yaml "
"--yes --workers-only")
if docker:
shutdown_cmd = wrap_docker(shutdown_cmd)
cmd += ("; {}; sudo shutdown -h now".format(shutdown_cmd))
_exec(updater, cmd, screen, tmux, port_forward=port_forward)
if tmux or screen:
attach_command_parts = ["ray attach", config_file]
if override_cluster_name is not None:
attach_command_parts.append(
"--cluster-name={}".format(override_cluster_name))
if tmux:
attach_command_parts.append("--tmux")
elif screen:
attach_command_parts.append("--screen")
attach_command = " ".join(attach_command_parts)
attach_info = "Use `{}` to check on command status.".format(
attach_command)
logger.info(attach_info)
finally:
provider.cleanup()
def _exec(updater, cmd, screen, tmux, port_forward=None):
if cmd:
if screen:
cmd = [
"screen", "-L", "-dm", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(cmd)
elif tmux:
# TODO: Consider providing named session functionality
cmd = [
"tmux", "new", "-d", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(cmd)
updater.cmd_runner.run(
cmd,
allocate_tty=True,
exit_on_fail=True,
port_forward=port_forward)
def rsync(config_file, source, target, override_cluster_name, down):
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
"""
assert bool(source) == bool(target), (
"Must either provide both or neither source and target.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
rsync(source, target)
else:
updater.sync_file_mounts(rsync)
finally:
provider.cleanup()
def get_head_node_ip(config_file, override_cluster_name):
"""Returns head node IP for given configuration file if exists."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node = _get_head_node(config, config_file, override_cluster_name)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
finally:
provider.cleanup()
return head_node_ip
def get_worker_node_ips(config_file, override_cluster_name):
"""Returns worker node IPs for given configuration file."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
})
if config.get("provider", {}).get("use_internal_ips", False) is True:
return [provider.internal_ip(node) for node in nodes]
else:
return [provider.external_ip(node) for node in nodes]
finally:
provider.cleanup()
def _get_head_node(config,
config_file,
override_cluster_name,
create_if_needed=False):
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
finally:
provider.cleanup()
if len(nodes) > 0:
head_node = nodes[0]
return head_node
elif create_if_needed:
get_or_create_head_node(
config,
config_file,
restart_only=False,
no_restart=False,
yes=True,
override_cluster_name=override_cluster_name)
return _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
else:
raise RuntimeError("Head node of cluster ({}) not found!".format(
config["cluster_name"]))
def confirm(msg, yes):
return None if yes else click.confirm(msg, abort=True)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/docker.py | Python | import os
import logging
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
logger = logging.getLogger(__name__)
def dockerize_if_needed(config):
if "docker" not in config:
return config
docker_image = config["docker"].get("image")
docker_pull = config["docker"].get("pull_before_run", True)
cname = config["docker"].get("container_name")
run_options = config["docker"].get("run_options", [])
head_docker_image = config["docker"].get("head_image", docker_image)
head_run_options = config["docker"].get("head_run_options", [])
worker_docker_image = config["docker"].get("worker_image", docker_image)
worker_run_options = config["docker"].get("worker_run_options", [])
ssh_user = config["auth"]["ssh_user"]
if not docker_image and not (head_docker_image and worker_docker_image):
if cname:
logger.warning(
"dockerize_if_needed: "
"Container name given but no Docker image(s) - continuing...")
return config
else:
assert cname, "Must provide container name!"
docker_mounts = {dst: dst for dst in config["file_mounts"]}
if docker_pull:
docker_pull_cmd = "docker pull {}".format(docker_image)
config["initialization_commands"].append(docker_pull_cmd)
head_docker_start = docker_start_cmds(ssh_user, head_docker_image,
docker_mounts, cname,
run_options + head_run_options)
worker_docker_start = docker_start_cmds(ssh_user, worker_docker_image,
docker_mounts, cname,
run_options + worker_run_options)
config["head_setup_commands"] = head_docker_start + (with_docker_exec(
config["head_setup_commands"], container_name=cname))
config["head_start_ray_commands"] = (
docker_autoscaler_setup(cname) + with_docker_exec(
config["head_start_ray_commands"], container_name=cname))
config["worker_setup_commands"] = worker_docker_start + (with_docker_exec(
config["worker_setup_commands"], container_name=cname))
config["worker_start_ray_commands"] = with_docker_exec(
config["worker_start_ray_commands"],
container_name=cname,
env_vars=["RAY_HEAD_IP"])
return config
def with_docker_exec(cmds, container_name, env_vars=None):
env_str = ""
if env_vars:
env_str = " ".join(
["-e {env}=${env}".format(env=env) for env in env_vars])
return [
"docker exec {} {} /bin/sh -c {} ".format(env_str, container_name,
quote(cmd)) for cmd in cmds
]
def aptwait_cmd():
return ("while sudo fuser"
" /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock"
" >/dev/null 2>&1; "
"do echo 'Waiting for release of dpkg/apt locks'; sleep 5; done")
def docker_start_cmds(user, image, mount, cname, user_options):
cmds = []
# create flags
# ports for the redis, object manager, and tune client
port_flags = " ".join([
"-p {port}:{port}".format(port=port)
for port in ["6379", "8076", "4321"]
])
mount_flags = " ".join(
["-v {src}:{dest}".format(src=k, dest=v) for k, v in mount.items()])
# for click, used in ray cli
env_vars = {"LC_ALL": "C.UTF-8", "LANG": "C.UTF-8"}
env_flags = " ".join(
["-e {name}={val}".format(name=k, val=v) for k, v in env_vars.items()])
user_options_str = " ".join(user_options)
# docker run command
docker_check = [
"docker", "inspect", "-f", "'{{.State.Running}}'", cname, "||"
]
docker_run = [
"docker", "run", "--rm", "--name {}".format(cname), "-d", "-it",
port_flags, mount_flags, env_flags, user_options_str, "--net=host",
image, "bash"
]
cmds.append(" ".join(docker_check + docker_run))
return cmds
def docker_autoscaler_setup(cname):
cmds = []
for path in ["~/ray_bootstrap_config.yaml", "~/ray_bootstrap_key.pem"]:
# needed because docker doesn't allow relative paths
base_path = os.path.basename(path)
cmds.append("docker cp {path} {cname}:{dpath}".format(
path=path, dpath=base_path, cname=cname))
cmds.extend(
with_docker_exec(
["cp {} {}".format("/" + base_path, path)],
container_name=cname))
return cmds
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/gcp/config.py | Python | import os
import logging
import time
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from googleapiclient import discovery, errors
logger = logging.getLogger(__name__)
crm = discovery.build("cloudresourcemanager", "v1")
iam = discovery.build("iam", "v1")
compute = discovery.build("compute", "v1")
VERSION = "v1"
RAY = "ray-autoscaler"
DEFAULT_SERVICE_ACCOUNT_ID = RAY + "-sa-" + VERSION
SERVICE_ACCOUNT_EMAIL_TEMPLATE = (
"{account_id}@{project_id}.iam.gserviceaccount.com")
DEFAULT_SERVICE_ACCOUNT_CONFIG = {
"displayName": "Ray Autoscaler Service Account ({})".format(VERSION),
}
DEFAULT_SERVICE_ACCOUNT_ROLES = ("roles/storage.objectAdmin",
"roles/compute.admin")
MAX_POLLS = 12
POLL_INTERVAL = 5
def wait_for_crm_operation(operation):
"""Poll for cloud resource manager operation until finished."""
logger.info("wait_for_crm_operation: "
"Waiting for operation {} to finish...".format(operation))
for _ in range(MAX_POLLS):
result = crm.operations().get(name=operation["name"]).execute()
if "error" in result:
raise Exception(result["error"])
if "done" in result and result["done"]:
logger.info("wait_for_crm_operation: Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def wait_for_compute_global_operation(project_name, operation):
"""Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.globalOperations().get(
project=project_name,
operation=operation["name"],
).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_global_operation: "
"Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def key_pair_name(i, region, project_id, ssh_user):
"""Returns the ith default gcp_key_pair_name."""
key_name = "{}_gcp_{}_{}_{}".format(RAY, region, project_id, ssh_user, i)
return key_name
def key_pair_paths(key_name):
"""Returns public and private key paths for a given key_name."""
public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name))
private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name))
return public_key_path, private_key_path
def generate_rsa_key_pair():
"""Create public and private ssh-keys."""
key = rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=2048)
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH).decode("utf-8")
pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8")
return public_key, pem
def bootstrap_gcp(config):
config = _configure_project(config)
config = _configure_iam_role(config)
config = _configure_key_pair(config)
config = _configure_subnet(config)
return config
def _configure_project(config):
"""Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
"""
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique.")
project = _get_project(project_id)
if project is None:
# Project not found, try creating it
_create_project(project_id)
project = _get_project(project_id)
assert project is not None, "Failed to create project"
assert project["lifecycleState"] == "ACTIVE", (
"Project status needs to be ACTIVE, got {}".format(
project["lifecycleState"]))
config["provider"]["project_id"] = project["projectId"]
return config
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"])
service_account = _get_service_account(email, config)
if service_account is None:
logger.info("_configure_iam_role: "
"Creating new service account {}".format(
DEFAULT_SERVICE_ACCOUNT_ID))
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
}]
return config
def _configure_key_pair(config):
"""Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
"""
if "ssh_private_key" in config["auth"]:
return config
ssh_user = config["auth"]["ssh_user"]
project = compute.projects().get(
project=config["provider"]["project_id"]).execute()
# Key pairs associated with project meta data. The key pairs are general,
# and not just ssh keys.
ssh_keys_str = next(
(item for item in project["commonInstanceMetadata"].get("items", [])
if item["key"] == "ssh-keys"), {}).get("value", "")
ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else []
# Try a few times to get or create a good key pair.
key_found = False
for i in range(10):
key_name = key_pair_name(i, config["provider"]["region"],
config["provider"]["project_id"], ssh_user)
public_key_path, private_key_path = key_pair_paths(key_name)
for ssh_key in ssh_keys:
key_parts = ssh_key.split(" ")
if len(key_parts) != 3:
continue
if key_parts[2] == ssh_user and os.path.exists(private_key_path):
# Found a key
key_found = True
break
# Create a key since it doesn't exist locally or in GCP
if not key_found and not os.path.exists(private_key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
public_key, private_key = generate_rsa_key_pair()
_create_project_ssh_key_pair(project, public_key, ssh_user)
with open(private_key_path, "w") as f:
f.write(private_key)
os.chmod(private_key_path, 0o600)
with open(public_key_path, "w") as f:
f.write(public_key)
key_found = True
break
if key_found:
break
assert key_found, "SSH keypair for user {} not found for {}".format(
ssh_user, private_key_path)
assert os.path.exists(private_key_path), (
"Private key file {} not found for user {}"
"".format(private_key_path, ssh_user))
logger.info("_configure_key_pair: "
"Private key not specified in config, using"
"{}".format(private_key_path))
config["auth"]["ssh_private_key"] = private_key_path
return config
def _configure_subnet(config):
"""Pick a reasonable subnet if not specified by the config."""
# Rationale: avoid subnet lookup if the network is already
# completely manually configured
if ("networkInterfaces" in config["head_node"]
and "networkInterfaces" in config["worker_nodes"]):
return config
subnets = _list_subnets(config)
if not subnets:
raise NotImplementedError("Should be able to create subnet.")
# TODO: make sure that we have usable subnet. Maybe call
# compute.subnetworks().listUsable? For some reason it didn't
# work out-of-the-box
default_subnet = subnets[0]
if "networkInterfaces" not in config["head_node"]:
config["head_node"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
if "networkInterfaces" not in config["worker_nodes"]:
config["worker_nodes"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
return config
def _list_subnets(config):
response = compute.subnetworks().list(
project=config["provider"]["project_id"],
region=config["provider"]["region"]).execute()
return response["items"]
def _get_subnet(config, subnet_id):
subnet = compute.subnetworks().get(
project=config["provider"]["project_id"],
region=config["provider"]["region"],
subnetwork=subnet_id,
).execute()
return subnet
def _get_project(project_id):
try:
project = crm.projects().get(projectId=project_id).execute()
except errors.HttpError as e:
if e.resp.status != 403:
raise
project = None
return project
def _create_project(project_id):
operation = crm.projects().create(body={
"projectId": project_id,
"name": project_id
}).execute()
result = wait_for_crm_operation(operation)
return result
def _get_service_account(account, config):
project_id = config["provider"]["project_id"]
full_name = ("projects/{project_id}/serviceAccounts/{account}"
"".format(project_id=project_id, account=account))
try:
service_account = iam.projects().serviceAccounts().get(
name=full_name).execute()
except errors.HttpError as e:
if e.resp.status != 404:
raise
service_account = None
return service_account
def _create_service_account(account_id, account_config, config):
project_id = config["provider"]["project_id"]
service_account = iam.projects().serviceAccounts().create(
name="projects/{project_id}".format(project_id=project_id),
body={
"accountId": account_id,
"serviceAccount": account_config,
}).execute()
return service_account
def _add_iam_policy_binding(service_account, roles):
"""Add new IAM roles for the service account."""
project_id = service_account["projectId"]
email = service_account["email"]
member_id = "serviceAccount:" + email
policy = crm.projects().getIamPolicy(
resource=project_id, body={}).execute()
already_configured = True
for role in roles:
role_exists = False
for binding in policy["bindings"]:
if binding["role"] == role:
if member_id not in binding["members"]:
binding["members"].append(member_id)
already_configured = False
role_exists = True
if not role_exists:
already_configured = False
policy["bindings"].append({
"members": [member_id],
"role": role,
})
if already_configured:
# In some managed environments, an admin needs to grant the
# roles, so only call setIamPolicy if needed.
return
result = crm.projects().setIamPolicy(
resource=project_id, body={
"policy": policy,
}).execute()
return result
def _create_project_ssh_key_pair(project, public_key, ssh_user):
"""Inserts an ssh-key into project commonInstanceMetadata"""
key_parts = public_key.split(" ")
# Sanity checks to make sure that the generated key matches expectation
assert len(key_parts) == 2, key_parts
assert key_parts[0] == "ssh-rsa", key_parts
new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format(
ssh_user=ssh_user, key_value=key_parts[1])
common_instance_metadata = project["commonInstanceMetadata"]
items = common_instance_metadata.get("items", [])
ssh_keys_i = next(
(i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None)
if ssh_keys_i is None:
items.append({"key": "ssh-keys", "value": new_ssh_meta})
else:
ssh_keys = items[ssh_keys_i]
ssh_keys["value"] += "\n" + new_ssh_meta
items[ssh_keys_i] = ssh_keys
common_instance_metadata["items"] = items
operation = compute.projects().setCommonInstanceMetadata(
project=project["name"], body=common_instance_metadata).execute()
response = wait_for_compute_global_operation(project["name"], operation)
return response
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/gcp/node_provider.py | Python | from uuid import uuid4
from threading import RLock
import time
import logging
from googleapiclient import discovery
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME
from ray.autoscaler.gcp.config import MAX_POLLS, POLL_INTERVAL
logger = logging.getLogger(__name__)
INSTANCE_NAME_MAX_LEN = 64
INSTANCE_NAME_UUID_LEN = 8
def wait_for_compute_zone_operation(compute, project_name, operation, zone):
"""Poll for compute zone operation until finished."""
logger.info("wait_for_compute_zone_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.zoneOperations().get(
project=project_name, operation=operation["name"],
zone=zone).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_zone_operation: "
"Operation {} finished.".format(operation["name"]))
break
time.sleep(POLL_INTERVAL)
return result
class GCPNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.lock = RLock()
self.compute = discovery.build("compute", "v1")
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters):
with self.lock:
if tag_filters:
label_filter_expr = "(" + " AND ".join([
"(labels.{key} = {value})".format(key=key, value=value)
for key, value in tag_filters.items()
]) + ")"
else:
label_filter_expr = ""
instance_state_filter_expr = "(" + " OR ".join([
"(status = {status})".format(status=status)
for status in {"PROVISIONING", "STAGING", "RUNNING"}
]) + ")"
cluster_name_filter_expr = ("(labels.{key} = {value})"
"".format(
key=TAG_RAY_CLUSTER_NAME,
value=self.cluster_name))
not_empty_filters = [
f for f in [
label_filter_expr,
instance_state_filter_expr,
cluster_name_filter_expr,
] if f
]
filter_expr = " AND ".join(not_empty_filters)
response = self.compute.instances().list(
project=self.provider_config["project_id"],
zone=self.provider_config["availability_zone"],
filter=filter_expr,
).execute()
instances = response.get("items", [])
# Note: All the operations use "name" as the unique instance id
self.cached_nodes = {i["name"]: i for i in instances}
return [i["name"] for i in instances]
def is_running(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
return node["status"] == "RUNNING"
def is_terminated(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
return node["status"] not in {"PROVISIONING", "STAGING", "RUNNING"}
def node_tags(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
labels = node.get("labels", {})
return labels
def set_node_tags(self, node_id, tags):
with self.lock:
labels = tags
project_id = self.provider_config["project_id"]
availability_zone = self.provider_config["availability_zone"]
node = self._get_node(node_id)
operation = self.compute.instances().setLabels(
project=project_id,
zone=availability_zone,
instance=node_id,
body={
"labels": dict(node["labels"], **labels),
"labelFingerprint": node["labelFingerprint"]
}).execute()
result = wait_for_compute_zone_operation(
self.compute, project_id, operation, availability_zone)
return result
def external_ip(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
def get_external_ip(node):
return node.get("networkInterfaces", [{}])[0].get(
"accessConfigs", [{}])[0].get("natIP", None)
ip = get_external_ip(node)
if ip is None:
node = self._get_node(node_id)
ip = get_external_ip(node)
return ip
def internal_ip(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
def get_internal_ip(node):
return node.get("networkInterfaces", [{}])[0].get("networkIP")
ip = get_internal_ip(node)
if ip is None:
node = self._get_node(node_id)
ip = get_internal_ip(node)
return ip
def create_node(self, base_config, tags, count):
with self.lock:
labels = tags # gcp uses "labels" instead of aws "tags"
project_id = self.provider_config["project_id"]
availability_zone = self.provider_config["availability_zone"]
config = base_config.copy()
name_label = labels[TAG_RAY_NODE_NAME]
assert (len(name_label) <=
(INSTANCE_NAME_MAX_LEN - INSTANCE_NAME_UUID_LEN - 1)), (
name_label, len(name_label))
machine_type = ("zones/{zone}/machineTypes/{machine_type}"
"".format(
zone=availability_zone,
machine_type=base_config["machineType"]))
labels = dict(config.get("labels", {}), **labels)
config.update({
"machineType": machine_type,
"labels": dict(labels,
**{TAG_RAY_CLUSTER_NAME: self.cluster_name}),
})
operations = [
self.compute.instances().insert(
project=project_id,
zone=availability_zone,
body=dict(
config, **{
"name": ("{name_label}-{uuid}".format(
name_label=name_label,
uuid=uuid4().hex[:INSTANCE_NAME_UUID_LEN]))
})).execute() for i in range(count)
]
results = [
wait_for_compute_zone_operation(self.compute, project_id,
operation, availability_zone)
for operation in operations
]
return results
def terminate_node(self, node_id):
with self.lock:
project_id = self.provider_config["project_id"]
availability_zone = self.provider_config["availability_zone"]
operation = self.compute.instances().delete(
project=project_id,
zone=availability_zone,
instance=node_id,
).execute()
result = wait_for_compute_zone_operation(
self.compute, project_id, operation, availability_zone)
return result
def _get_node(self, node_id):
self.non_terminated_nodes({}) # Side effect: updates cache
with self.lock:
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
instance = self.compute.instances().get(
project=self.provider_config["project_id"],
zone=self.provider_config["availability_zone"],
instance=node_id,
).execute()
return instance
def _get_cached_node(self, node_id):
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/kubernetes/__init__.py | Python | import kubernetes
from kubernetes.config.config_exception import ConfigException
_configured = False
_core_api = None
_auth_api = None
def _load_config():
global _configured
if _configured:
return
try:
kubernetes.config.load_incluster_config()
except ConfigException:
kubernetes.config.load_kube_config()
_configured = True
def core_api():
global _core_api
if _core_api is None:
_load_config()
_core_api = kubernetes.client.CoreV1Api()
return _core_api
def auth_api():
global _auth_api
if _auth_api is None:
_load_config()
_auth_api = kubernetes.client.RbacAuthorizationV1Api()
return _auth_api
log_prefix = "KubernetesNodeProvider: "
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/kubernetes/config.py | Python | import logging
from kubernetes import client
from ray.autoscaler.kubernetes import auth_api, core_api, log_prefix
logger = logging.getLogger(__name__)
class InvalidNamespaceError(ValueError):
def __init__(self, field_name, namespace):
self.message = ("Namespace of {} config doesn't match provided "
"namespace '{}'. Either set it to {} or remove the "
"field".format(field_name, namespace, namespace))
def __str__(self):
return self.message
def using_existing_msg(resource_type, name):
return "using existing {} '{}'".format(resource_type, name)
def not_found_msg(resource_type, name):
return "{} '{}' not found, attempting to create it".format(
resource_type, name)
def created_msg(resource_type, name):
return "successfully created {} '{}'".format(resource_type, name)
def not_provided_msg(resource_type):
return "no {} config provided, must already exist".format(resource_type)
def bootstrap_kubernetes(config):
if not config["provider"]["use_internal_ips"]:
return ValueError("Exposing external IP addresses for ray pods isn't "
"currently supported. Please set "
"'use_internal_ips' to false.")
namespace = _configure_namespace(config["provider"])
_configure_autoscaler_service_account(namespace, config["provider"])
_configure_autoscaler_role(namespace, config["provider"])
_configure_autoscaler_role_binding(namespace, config["provider"])
return config
def _configure_namespace(provider_config):
namespace_field = "namespace"
if namespace_field not in provider_config:
raise ValueError("Must specify namespace in Kubernetes config.")
namespace = provider_config[namespace_field]
field_selector = "metadata.name={}".format(namespace)
namespaces = core_api().list_namespace(field_selector=field_selector).items
if len(namespaces) > 0:
assert len(namespaces) == 1
logger.info(log_prefix +
using_existing_msg(namespace_field, namespace))
return namespace
logger.info(log_prefix + not_found_msg(namespace_field, namespace))
namespace_config = client.V1Namespace(
metadata=client.V1ObjectMeta(name=namespace))
core_api().create_namespace(namespace_config)
logger.info(log_prefix + created_msg(namespace_field, namespace))
return namespace
def _configure_autoscaler_service_account(namespace, provider_config):
account_field = "autoscaler_service_account"
if account_field not in provider_config:
logger.info(log_prefix + not_provided_msg(account_field))
return
account = provider_config[account_field]
if "namespace" not in account["metadata"]:
account["metadata"]["namespace"] = namespace
elif account["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(account_field, namespace)
name = account["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = core_api().list_namespaced_service_account(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(account_field, name))
return
logger.info(log_prefix + not_found_msg(account_field, name))
core_api().create_namespaced_service_account(namespace, account)
logger.info(log_prefix + created_msg(account_field, name))
def _configure_autoscaler_role(namespace, provider_config):
role_field = "autoscaler_role"
if role_field not in provider_config:
logger.info(log_prefix + not_provided_msg(role_field))
return
role = provider_config[role_field]
if "namespace" not in role["metadata"]:
role["metadata"]["namespace"] = namespace
elif role["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(role_field, namespace)
name = role["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = auth_api().list_namespaced_role(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(role_field, name))
return
logger.info(log_prefix + not_found_msg(role_field, name))
auth_api().create_namespaced_role(namespace, role)
logger.info(log_prefix + created_msg(role_field, name))
def _configure_autoscaler_role_binding(namespace, provider_config):
binding_field = "autoscaler_role_binding"
if binding_field not in provider_config:
logger.info(log_prefix + not_provided_msg(binding_field))
return
binding = provider_config[binding_field]
if "namespace" not in binding["metadata"]:
binding["metadata"]["namespace"] = namespace
elif binding["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(binding_field, namespace)
for subject in binding["subjects"]:
if "namespace" not in subject:
subject["namespace"] = namespace
elif subject["namespace"] != namespace:
raise InvalidNamespaceError(
binding_field + " subject '{}'".format(subject["name"]),
namespace)
name = binding["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = auth_api().list_namespaced_role_binding(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(binding_field, name))
return
logger.info(log_prefix + not_found_msg(binding_field, name))
auth_api().create_namespaced_role_binding(namespace, binding)
logger.info(log_prefix + created_msg(binding_field, name))
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/kubernetes/kubectl-rsync.sh | Shell | #!/bin/bash
# Helper script to use kubectl as a remote shell for rsync to sync files
# to/from pods that have rsync installed. Taken from:
# https://serverfault.com/questions/741670/rsync-files-to-a-kubernetes-pod/746352
if [ -z "$KRSYNC_STARTED" ]; then
export KRSYNC_STARTED=true
exec rsync --blocking-io --rsh "$0" $@
fi
# Running as --rsh
namespace=''
pod=$1
shift
# If use uses pod@namespace rsync passes as: {us} -l pod namespace ...
if [ "X$pod" = "X-l" ]; then
pod=$1
shift
namespace="-n $1"
shift
fi
exec kubectl $namespace exec -i $pod -- "$@"
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/kubernetes/node_provider.py | Python | import logging
from ray.autoscaler.kubernetes import core_api, log_prefix
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME
logger = logging.getLogger(__name__)
def to_label_selector(tags):
label_selector = ""
for k, v in tags.items():
if label_selector != "":
label_selector += ","
label_selector += "{}={}".format(k, v)
return label_selector
class KubernetesNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cluster_name = cluster_name
self.namespace = provider_config["namespace"]
def non_terminated_nodes(self, tag_filters):
# Match pods that are in the 'Pending' or 'Running' phase.
# Unfortunately there is no OR operator in field selectors, so we
# have to match on NOT any of the other phases.
field_selector = ",".join([
"status.phase!=Failed",
"status.phase!=Unknown",
"status.phase!=Succeeded",
"status.phase!=Terminating",
])
tag_filters[TAG_RAY_CLUSTER_NAME] = self.cluster_name
label_selector = to_label_selector(tag_filters)
pod_list = core_api().list_namespaced_pod(
self.namespace,
field_selector=field_selector,
label_selector=label_selector)
return [pod.metadata.name for pod in pod_list.items]
def is_running(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.phase == "Running"
def is_terminated(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.phase not in ["Running", "Pending"]
def node_tags(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.metadata.labels
def external_ip(self, node_id):
raise NotImplementedError("Must use internal IPs with Kubernetes.")
def internal_ip(self, node_id):
pod = core_api().read_namespaced_pod_status(node_id, self.namespace)
return pod.status.pod_ip
def set_node_tags(self, node_id, tags):
body = {"metadata": {"labels": tags}}
core_api().patch_namespaced_pod(node_id, self.namespace, body)
def create_node(self, node_config, tags, count):
pod_spec = node_config.copy()
tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
pod_spec["metadata"]["namespace"] = self.namespace
pod_spec["metadata"]["labels"] = tags
logger.info(log_prefix + "calling create_namespaced_pod "
"(count={}).".format(count))
for _ in range(count):
core_api().create_namespaced_pod(self.namespace, pod_spec)
def terminate_node(self, node_id):
core_api().delete_namespaced_pod(node_id, self.namespace)
def terminate_nodes(self, node_ids):
for node_id in node_ids:
self.terminate_node(node_id)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/local/config.py | Python | def bootstrap_local(config):
return config
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/local/node_provider.py | Python | from filelock import FileLock
from threading import RLock
import json
import os
import socket
import logging
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, NODE_TYPE_WORKER, \
NODE_TYPE_HEAD
logger = logging.getLogger(__name__)
filelock_logger = logging.getLogger("filelock")
filelock_logger.setLevel(logging.WARNING)
class ClusterState:
def __init__(self, lock_path, save_path, provider_config):
self.lock = RLock()
self.file_lock = FileLock(lock_path)
self.save_path = save_path
with self.lock:
with self.file_lock:
if os.path.exists(self.save_path):
workers = json.loads(open(self.save_path).read())
head_config = workers.get(provider_config["head_ip"])
if (not head_config or
head_config.get("tags", {}).get(TAG_RAY_NODE_TYPE)
!= NODE_TYPE_HEAD):
workers = {}
logger.info("Head IP changed - recreating cluster.")
else:
workers = {}
logger.info("ClusterState: "
"Loaded cluster state: {}".format(list(workers)))
for worker_ip in provider_config["worker_ips"]:
if worker_ip not in workers:
workers[worker_ip] = {
"tags": {
TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER
},
"state": "terminated",
}
else:
assert workers[worker_ip]["tags"][
TAG_RAY_NODE_TYPE] == NODE_TYPE_WORKER
if provider_config["head_ip"] not in workers:
workers[provider_config["head_ip"]] = {
"tags": {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD
},
"state": "terminated",
}
else:
assert workers[provider_config["head_ip"]]["tags"][
TAG_RAY_NODE_TYPE] == NODE_TYPE_HEAD
assert len(workers) == len(provider_config["worker_ips"]) + 1
with open(self.save_path, "w") as f:
logger.debug("ClusterState: "
"Writing cluster state: {}".format(workers))
f.write(json.dumps(workers))
def get(self):
with self.lock:
with self.file_lock:
workers = json.loads(open(self.save_path).read())
return workers
def put(self, worker_id, info):
assert "tags" in info
assert "state" in info
with self.lock:
with self.file_lock:
workers = self.get()
workers[worker_id] = info
with open(self.save_path, "w") as f:
logger.info("ClusterState: "
"Writing cluster state: {}".format(
list(workers)))
f.write(json.dumps(workers))
class LocalNodeProvider(NodeProvider):
"""NodeProvider for private/local clusters.
`node_id` is overloaded to also be `node_ip` in this class.
"""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.state = ClusterState("/tmp/cluster-{}.lock".format(cluster_name),
"/tmp/cluster-{}.state".format(cluster_name),
provider_config)
def non_terminated_nodes(self, tag_filters):
workers = self.state.get()
matching_ips = []
for worker_ip, info in workers.items():
if info["state"] == "terminated":
continue
ok = True
for k, v in tag_filters.items():
if info["tags"].get(k) != v:
ok = False
break
if ok:
matching_ips.append(worker_ip)
return matching_ips
def is_running(self, node_id):
return self.state.get()[node_id]["state"] == "running"
def is_terminated(self, node_id):
return not self.is_running(node_id)
def node_tags(self, node_id):
return self.state.get()[node_id]["tags"]
def external_ip(self, node_id):
return socket.gethostbyname(node_id)
def internal_ip(self, node_id):
return socket.gethostbyname(node_id)
def set_node_tags(self, node_id, tags):
with self.state.file_lock:
info = self.state.get()[node_id]
info["tags"].update(tags)
self.state.put(node_id, info)
def create_node(self, node_config, tags, count):
node_type = tags[TAG_RAY_NODE_TYPE]
with self.state.file_lock:
workers = self.state.get()
for node_id, info in workers.items():
if (info["state"] == "terminated"
and info["tags"][TAG_RAY_NODE_TYPE] == node_type):
info["tags"] = tags
info["state"] = "running"
self.state.put(node_id, info)
return
def terminate_node(self, node_id):
workers = self.state.get()
info = workers[node_id]
info["state"] = "terminated"
self.state.put(node_id, info)
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/log_timer.py | Python | import datetime
import logging
logger = logging.getLogger(__name__)
class LogTimer:
def __init__(self, message):
self._message = message
def __enter__(self):
self._start_time = datetime.datetime.utcnow()
def __exit__(self, *_):
td = datetime.datetime.utcnow() - self._start_time
logger.info(self._message +
" [LogTimer={:.0f}ms]".format(td.total_seconds() * 1000))
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/node_provider.py | Python | import importlib
import logging
import os
import yaml
logger = logging.getLogger(__name__)
def import_aws():
from ray.autoscaler.aws.config import bootstrap_aws
from ray.autoscaler.aws.node_provider import AWSNodeProvider
return bootstrap_aws, AWSNodeProvider
def import_gcp():
from ray.autoscaler.gcp.config import bootstrap_gcp
from ray.autoscaler.gcp.node_provider import GCPNodeProvider
return bootstrap_gcp, GCPNodeProvider
def import_local():
from ray.autoscaler.local.config import bootstrap_local
from ray.autoscaler.local.node_provider import LocalNodeProvider
return bootstrap_local, LocalNodeProvider
def import_kubernetes():
from ray.autoscaler.kubernetes.config import bootstrap_kubernetes
from ray.autoscaler.kubernetes.node_provider import KubernetesNodeProvider
return bootstrap_kubernetes, KubernetesNodeProvider
def load_local_example_config():
import ray.autoscaler.local as ray_local
return os.path.join(
os.path.dirname(ray_local.__file__), "example-full.yaml")
def load_kubernetes_example_config():
import ray.autoscaler.kubernetes as ray_kubernetes
return os.path.join(
os.path.dirname(ray_kubernetes.__file__), "example-full.yaml")
def load_aws_example_config():
import ray.autoscaler.aws as ray_aws
return os.path.join(os.path.dirname(ray_aws.__file__), "example-full.yaml")
def load_gcp_example_config():
import ray.autoscaler.gcp as ray_gcp
return os.path.join(os.path.dirname(ray_gcp.__file__), "example-full.yaml")
def import_external():
"""Mock a normal provider importer."""
def return_it_back(config):
return config
return return_it_back, None
NODE_PROVIDERS = {
"local": import_local,
"aws": import_aws,
"gcp": import_gcp,
"azure": None, # TODO: support more node providers
"kubernetes": import_kubernetes,
"docker": None,
"external": import_external # Import an external module
}
DEFAULT_CONFIGS = {
"local": load_local_example_config,
"aws": load_aws_example_config,
"gcp": load_gcp_example_config,
"azure": None, # TODO: support more node providers
"kubernetes": load_kubernetes_example_config,
"docker": None,
}
def load_class(path):
"""
Load a class at runtime given a full path.
Example of the path: mypkg.mysubpkg.myclass
"""
class_data = path.split(".")
if len(class_data) < 2:
raise ValueError(
"You need to pass a valid path like mymodule.provider_class")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
def get_node_provider(provider_config, cluster_name):
if provider_config["type"] == "external":
provider_cls = load_class(path=provider_config["module"])
return provider_cls(provider_config, cluster_name)
importer = NODE_PROVIDERS.get(provider_config["type"])
if importer is None:
raise NotImplementedError("Unsupported node provider: {}".format(
provider_config["type"]))
_, provider_cls = importer()
return provider_cls(provider_config, cluster_name)
def get_default_config(provider_config):
if provider_config["type"] == "external":
return {}
load_config = DEFAULT_CONFIGS.get(provider_config["type"])
if load_config is None:
raise NotImplementedError("Unsupported node provider: {}".format(
provider_config["type"]))
path_to_default = load_config()
with open(path_to_default) as f:
defaults = yaml.safe_load(f)
return defaults
class NodeProvider:
"""Interface for getting and returning nodes from a Cloud.
NodeProviders are namespaced by the `cluster_name` parameter; they only
operate on nodes within that namespace.
Nodes may be in one of three states: {pending, running, terminated}. Nodes
appear immediately once started by `create_node`, and transition
immediately to terminated when `terminate_node` is called.
"""
def __init__(self, provider_config, cluster_name):
self.provider_config = provider_config
self.cluster_name = cluster_name
def non_terminated_nodes(self, tag_filters):
"""Return a list of node ids filtered by the specified tags dict.
This list must not include terminated nodes. For performance reasons,
providers are allowed to cache the result of a call to nodes() to
serve single-node queries (e.g. is_running(node_id)). This means that
nodes() must be called again to refresh results.
Examples:
>>> provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: "worker"})
["node-1", "node-2"]
"""
raise NotImplementedError
def is_running(self, node_id):
"""Return whether the specified node is running."""
raise NotImplementedError
def is_terminated(self, node_id):
"""Return whether the specified node is terminated."""
raise NotImplementedError
def node_tags(self, node_id):
"""Returns the tags of the given node (string dict)."""
raise NotImplementedError
def external_ip(self, node_id):
"""Returns the external ip of the given node."""
raise NotImplementedError
def internal_ip(self, node_id):
"""Returns the internal ip (Ray ip) of the given node."""
raise NotImplementedError
def create_node(self, node_config, tags, count):
"""Creates a number of nodes within the namespace."""
raise NotImplementedError
def set_node_tags(self, node_id, tags):
"""Sets the tag values (string dict) for the specified node."""
raise NotImplementedError
def terminate_node(self, node_id):
"""Terminates the specified node."""
raise NotImplementedError
def terminate_nodes(self, node_ids):
"""Terminates a set of nodes. May be overridden with a batch method."""
for node_id in node_ids:
logger.info("NodeProvider: "
"{}: Terminating node".format(node_id))
self.terminate_node(node_id)
def cleanup(self):
"""Clean-up when a Provider is no longer required."""
pass
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta | |
python/ray/autoscaler/tags.py | Python | """The Ray autoscaler uses tags/labels to associate metadata with instances."""
# Tag for the name of the node
TAG_RAY_NODE_NAME = "ray-node-name"
# Tag for the type of node (e.g. Head, Worker)
TAG_RAY_NODE_TYPE = "ray-node-type"
NODE_TYPE_HEAD = "head"
NODE_TYPE_WORKER = "worker"
# Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS = "ray-node-status"
STATUS_UNINITIALIZED = "uninitialized"
STATUS_WAITING_FOR_SSH = "waiting-for-ssh"
STATUS_SYNCING_FILES = "syncing-files"
STATUS_SETTING_UP = "setting-up"
STATUS_UPDATE_FAILED = "update-failed"
STATUS_UP_TO_DATE = "up-to-date"
# Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME = "ray-cluster-name"
# Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG = "ray-launch-config"
# Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config"
| zhuohan123/hoplite-rllib | 3 | Python | zhuohan123 | Zhuohan Li | vLLM / Meta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.