file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
rllib/offline/json_writer.py
|
Python
|
from datetime import datetime
import json
import logging
import numpy as np
import os
from six.moves.urllib.parse import urlparse
import time
try:
from smart_open import smart_open
except ImportError:
smart_open = None
from ray.rllib.policy.sample_batch import MultiAgentBatch
from ray.rllib.offline.io_context import IOContext
from ray.rllib.offline.output_writer import OutputWriter
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.compression import pack, compression_supported
logger = logging.getLogger(__name__)
@PublicAPI
class JsonWriter(OutputWriter):
"""Writer object that saves experiences in JSON file chunks."""
@PublicAPI
def __init__(self,
path,
ioctx=None,
max_file_size=64 * 1024 * 1024,
compress_columns=frozenset(["obs", "new_obs"])):
"""Initialize a JsonWriter.
Arguments:
path (str): a path/URI of the output directory to save files in.
ioctx (IOContext): current IO context object.
max_file_size (int): max size of single files before rolling over.
compress_columns (list): list of sample batch columns to compress.
"""
self.ioctx = ioctx or IOContext()
self.max_file_size = max_file_size
self.compress_columns = compress_columns
if urlparse(path).scheme:
self.path_is_uri = True
else:
path = os.path.abspath(os.path.expanduser(path))
# Try to create local dirs if they don't exist
try:
os.makedirs(path)
except OSError:
pass # already exists
assert os.path.exists(path), "Failed to create {}".format(path)
self.path_is_uri = False
self.path = path
self.file_index = 0
self.bytes_written = 0
self.cur_file = None
@override(OutputWriter)
def write(self, sample_batch):
start = time.time()
data = _to_json(sample_batch, self.compress_columns)
f = self._get_file()
f.write(data)
f.write("\n")
if hasattr(f, "flush"): # legacy smart_open impls
f.flush()
self.bytes_written += len(data)
logger.debug("Wrote {} bytes to {} in {}s".format(
len(data), f,
time.time() - start))
def _get_file(self):
if not self.cur_file or self.bytes_written >= self.max_file_size:
if self.cur_file:
self.cur_file.close()
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
path = os.path.join(
self.path, "output-{}_worker-{}_{}.json".format(
timestr, self.ioctx.worker_index, self.file_index))
if self.path_is_uri:
if smart_open is None:
raise ValueError(
"You must install the `smart_open` module to write "
"to URIs like {}".format(path))
self.cur_file = smart_open(path, "w")
else:
self.cur_file = open(path, "w")
self.file_index += 1
self.bytes_written = 0
logger.info("Writing to new output file {}".format(self.cur_file))
return self.cur_file
def _to_jsonable(v, compress):
if compress and compression_supported():
return str(pack(v))
elif isinstance(v, np.ndarray):
return v.tolist()
return v
def _to_json(batch, compress_columns):
out = {}
if isinstance(batch, MultiAgentBatch):
out["type"] = "MultiAgentBatch"
out["count"] = batch.count
policy_batches = {}
for policy_id, sub_batch in batch.policy_batches.items():
policy_batches[policy_id] = {}
for k, v in sub_batch.data.items():
policy_batches[policy_id][k] = _to_jsonable(
v, compress=k in compress_columns)
out["policy_batches"] = policy_batches
else:
out["type"] = "SampleBatch"
for k, v in batch.data.items():
out[k] = _to_jsonable(v, compress=k in compress_columns)
return json.dumps(out)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/mixed_input.py
|
Python
|
import numpy as np
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.offline.json_reader import JsonReader
from ray.rllib.utils.annotations import override, DeveloperAPI
@DeveloperAPI
class MixedInput(InputReader):
"""Mixes input from a number of other input sources.
Examples:
>>> MixedInput({
"sampler": 0.4,
"/tmp/experiences/*.json": 0.4,
"s3://bucket/expert.json": 0.2,
}, ioctx)
"""
@DeveloperAPI
def __init__(self, dist, ioctx):
"""Initialize a MixedInput.
Arguments:
dist (dict): dict mapping JSONReader paths or "sampler" to
probabilities. The probabilities must sum to 1.0.
ioctx (IOContext): current IO context object.
"""
if sum(dist.values()) != 1.0:
raise ValueError("Values must sum to 1.0: {}".format(dist))
self.choices = []
self.p = []
for k, v in dist.items():
if k == "sampler":
self.choices.append(ioctx.default_sampler_input())
else:
self.choices.append(JsonReader(k))
self.p.append(v)
@override(InputReader)
def next(self):
source = np.random.choice(self.choices, p=self.p)
return source.next()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/off_policy_estimator.py
|
Python
|
from collections import namedtuple
import logging
from ray.rllib.policy.sample_batch import MultiAgentBatch
from ray.rllib.utils.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
OffPolicyEstimate = namedtuple("OffPolicyEstimate",
["estimator_name", "metrics"])
@DeveloperAPI
class OffPolicyEstimator:
"""Interface for an off policy reward estimator."""
@DeveloperAPI
def __init__(self, policy, gamma):
"""Creates an off-policy estimator.
Arguments:
policy (Policy): Policy to evaluate.
gamma (float): Discount of the MDP.
"""
self.policy = policy
self.gamma = gamma
self.new_estimates = []
@classmethod
def create(cls, ioctx):
"""Create an off-policy estimator from a IOContext."""
gamma = ioctx.worker.policy_config["gamma"]
# Grab a reference to the current model
keys = list(ioctx.worker.policy_map.keys())
if len(keys) > 1:
raise NotImplementedError(
"Off-policy estimation is not implemented for multi-agent. "
"You can set `input_evaluation: []` to resolve this.")
policy = ioctx.worker.get_policy(keys[0])
return cls(policy, gamma)
@DeveloperAPI
def estimate(self, batch):
"""Returns an estimate for the given batch of experiences.
The batch will only contain data from one episode, but it may only be
a fragment of an episode.
"""
raise NotImplementedError
@DeveloperAPI
def action_prob(self, batch):
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
_, _, info = self.policy.compute_actions(
obs_batch=batch["obs"],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get("prev_action"),
prev_reward_batch=batch.data.get("prev_reward"),
info_batch=batch.data.get("info"))
if "action_prob" not in info:
raise ValueError(
"Off-policy estimation is not possible unless the policy "
"returns action probabilities when computing actions (i.e., "
"the 'action_prob' key is output by the policy). You "
"can set `input_evaluation: []` to resolve this.")
return info["action_prob"]
@DeveloperAPI
def process(self, batch):
self.new_estimates.append(self.estimate(batch))
@DeveloperAPI
def check_can_estimate_for(self, batch):
"""Returns whether we can support OPE for this batch."""
if isinstance(batch, MultiAgentBatch):
raise ValueError(
"IS-estimation is not implemented for multi-agent batches. "
"You can set `input_evaluation: []` to resolve this.")
if "action_prob" not in batch:
raise ValueError(
"Off-policy estimation is not possible unless the inputs "
"include action probabilities (i.e., the policy is stochastic "
"and emits the 'action_prob' key). For DQN this means using "
"`soft_q: True`. You can also set `input_evaluation: []` to "
"disable estimation.")
@DeveloperAPI
def get_metrics(self):
"""Return a list of new episode metric estimates since the last call.
Returns:
list of OffPolicyEstimate objects.
"""
out = self.new_estimates
self.new_estimates = []
return out
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/output_writer.py
|
Python
|
from ray.rllib.utils.annotations import override
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class OutputWriter:
"""Writer object for saving experiences from policy evaluation."""
@PublicAPI
def write(self, sample_batch):
"""Save a batch of experiences.
Arguments:
sample_batch: SampleBatch or MultiAgentBatch to save.
"""
raise NotImplementedError
class NoopOutput(OutputWriter):
"""Output writer that discards its outputs."""
@override(OutputWriter)
def write(self, sample_batch):
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/shuffled_input.py
|
Python
|
import logging
import random
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.utils.annotations import override, DeveloperAPI
logger = logging.getLogger(__name__)
@DeveloperAPI
class ShuffledInput(InputReader):
"""Randomizes data over a sliding window buffer of N batches.
This increases the randomization of the data, which is useful if the
batches were not in random order to start with.
"""
@DeveloperAPI
def __init__(self, child, n=0):
"""Initialize a MixedInput.
Arguments:
child (InputReader): child input reader to shuffle.
n (int): if positive, shuffle input over this many batches.
"""
self.n = n
self.child = child
self.buffer = []
@override(InputReader)
def next(self):
if self.n <= 1:
return self.child.next()
if len(self.buffer) < self.n:
logger.info("Filling shuffle buffer to {} batches".format(self.n))
while len(self.buffer) < self.n:
self.buffer.append(self.child.next())
logger.info("Shuffle buffer filled")
i = random.randint(0, len(self.buffer) - 1)
self.buffer[i] = self.child.next()
return random.choice(self.buffer)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/wis_estimator.py
|
Python
|
from ray.rllib.offline.off_policy_estimator import OffPolicyEstimator, \
OffPolicyEstimate
from ray.rllib.utils.annotations import override
class WeightedImportanceSamplingEstimator(OffPolicyEstimator):
"""The weighted step-wise IS estimator.
Step-wise WIS estimator in https://arxiv.org/pdf/1511.03722.pdf"""
def __init__(self, policy, gamma):
OffPolicyEstimator.__init__(self, policy, gamma)
self.filter_values = []
self.filter_counts = []
@override(OffPolicyEstimator)
def estimate(self, batch):
self.check_can_estimate_for(batch)
rewards, old_prob = batch["rewards"], batch["action_prob"]
new_prob = self.action_prob(batch)
# calculate importance ratios
p = []
for t in range(batch.count - 1):
if t == 0:
pt_prev = 1.0
else:
pt_prev = p[t - 1]
p.append(pt_prev * new_prob[t] / old_prob[t])
for t, v in enumerate(p):
if t >= len(self.filter_values):
self.filter_values.append(v)
self.filter_counts.append(1.0)
else:
self.filter_values[t] += v
self.filter_counts[t] += 1.0
# calculate stepwise weighted IS estimate
V_prev, V_step_WIS = 0.0, 0.0
for t in range(batch.count - 1):
V_prev += rewards[t] * self.gamma**t
w_t = self.filter_values[t] / self.filter_counts[t]
V_step_WIS += p[t] / w_t * rewards[t] * self.gamma**t
estimation = OffPolicyEstimate(
"wis", {
"V_prev": V_prev,
"V_step_WIS": V_step_WIS,
"V_gain_est": V_step_WIS / max(1e-8, V_prev),
})
return estimation
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/__init__.py
|
Python
|
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.async_replay_optimizer import AsyncReplayOptimizer
from ray.rllib.optimizers.async_samples_optimizer import AsyncSamplesOptimizer
from ray.rllib.optimizers.async_gradients_optimizer import \
AsyncGradientsOptimizer
from ray.rllib.optimizers.sync_samples_optimizer import SyncSamplesOptimizer
from ray.rllib.optimizers.sync_replay_optimizer import SyncReplayOptimizer
from ray.rllib.optimizers.sync_batch_replay_optimizer import \
SyncBatchReplayOptimizer
from ray.rllib.optimizers.microbatch_optimizer import MicrobatchOptimizer
from ray.rllib.optimizers.multi_gpu_optimizer import LocalMultiGPUOptimizer
__all__ = [
"PolicyOptimizer",
"AsyncReplayOptimizer",
"AsyncSamplesOptimizer",
"AsyncGradientsOptimizer",
"MicrobatchOptimizer",
"SyncSamplesOptimizer",
"SyncReplayOptimizer",
"LocalMultiGPUOptimizer",
"SyncBatchReplayOptimizer",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/aso_aggregator.py
|
Python
|
"""Helper class for AsyncSamplesOptimizer."""
import numpy as np
import random
import ray
from ray.rllib.utils.actors import TaskPool
from ray.rllib.utils.annotations import override
from ray.rllib.utils.memory import ray_get_and_free
class Aggregator:
"""An aggregator collects and processes samples from workers.
This class is used to abstract away the strategy for sample collection.
For example, you may want to use a tree of actors to collect samples. The
use of multiple actors can be necessary to offload expensive work such
as concatenating and decompressing sample batches.
Attributes:
local_worker: local RolloutWorker copy
"""
def iter_train_batches(self):
"""Returns a generator over batches ready to learn on.
Iterating through this generator will also send out weight updates to
remote workers as needed.
This call may block until results are available.
"""
raise NotImplementedError
def broadcast_new_weights(self):
"""Broadcast a new set of weights from the local workers."""
raise NotImplementedError
def should_broadcast(self):
"""Returns whether broadcast() should be called to update weights."""
raise NotImplementedError
def stats(self):
"""Returns runtime statistics for debugging."""
raise NotImplementedError
def reset(self, remote_workers):
"""Called to change the set of remote workers being used."""
raise NotImplementedError
class AggregationWorkerBase:
"""Aggregators should extend from this class."""
def __init__(self, initial_weights_obj_id, remote_workers,
max_sample_requests_in_flight_per_worker, replay_proportion,
replay_buffer_num_slots, train_batch_size, sample_batch_size):
"""Initialize an aggregator.
Arguments:
initial_weights_obj_id (ObjectID): initial worker weights
remote_workers (list): set of remote workers assigned to this agg
max_sample_request_in_flight_per_worker (int): max queue size per
worker
replay_proportion (float): ratio of replay to sampled outputs
replay_buffer_num_slots (int): max number of sample batches to
store in the replay buffer
train_batch_size (int): size of batches to learn on
sample_batch_size (int): size of batches to sample from workers
"""
self.broadcasted_weights = initial_weights_obj_id
self.remote_workers = remote_workers
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
if replay_proportion:
if replay_buffer_num_slots * sample_batch_size <= train_batch_size:
raise ValueError(
"Replay buffer size is too small to produce train, "
"please increase replay_buffer_num_slots.",
replay_buffer_num_slots, sample_batch_size,
train_batch_size)
# Kick off async background sampling
self.sample_tasks = TaskPool()
for ev in self.remote_workers:
ev.set_weights.remote(self.broadcasted_weights)
for _ in range(max_sample_requests_in_flight_per_worker):
self.sample_tasks.add(ev, ev.sample.remote())
self.batch_buffer = []
self.replay_proportion = replay_proportion
self.replay_buffer_num_slots = replay_buffer_num_slots
self.replay_batches = []
self.replay_index = 0
self.num_sent_since_broadcast = 0
self.num_weight_syncs = 0
self.num_replayed = 0
@override(Aggregator)
def iter_train_batches(self, max_yield=999):
"""Iterate over train batches.
Arguments:
max_yield (int): Max number of batches to iterate over in this
cycle. Setting this avoids iter_train_batches returning too
much data at once.
"""
for ev, sample_batch in self._augment_with_replay(
self.sample_tasks.completed_prefetch(
blocking_wait=True, max_yield=max_yield)):
sample_batch.decompress_if_needed()
self.batch_buffer.append(sample_batch)
if sum(b.count
for b in self.batch_buffer) >= self.train_batch_size:
if len(self.batch_buffer) == 1:
# make a defensive copy to avoid sharing plasma memory
# across multiple threads
train_batch = self.batch_buffer[0].copy()
else:
train_batch = self.batch_buffer[0].concat_samples(
self.batch_buffer)
yield train_batch
self.batch_buffer = []
# If the batch was replayed, skip the update below.
if ev is None:
continue
# Put in replay buffer if enabled
if self.replay_buffer_num_slots > 0:
if len(self.replay_batches) < self.replay_buffer_num_slots:
self.replay_batches.append(sample_batch)
else:
self.replay_batches[self.replay_index] = sample_batch
self.replay_index += 1
self.replay_index %= self.replay_buffer_num_slots
ev.set_weights.remote(self.broadcasted_weights)
self.num_weight_syncs += 1
self.num_sent_since_broadcast += 1
# Kick off another sample request
self.sample_tasks.add(ev, ev.sample.remote())
@override(Aggregator)
def stats(self):
return {
"num_weight_syncs": self.num_weight_syncs,
"num_steps_replayed": self.num_replayed,
}
@override(Aggregator)
def reset(self, remote_workers):
self.sample_tasks.reset_workers(remote_workers)
def _augment_with_replay(self, sample_futures):
def can_replay():
num_needed = int(
np.ceil(self.train_batch_size / self.sample_batch_size))
return len(self.replay_batches) > num_needed
for ev, sample_batch in sample_futures:
sample_batch = ray_get_and_free(sample_batch)
yield ev, sample_batch
if can_replay():
f = self.replay_proportion
while random.random() < f:
f -= 1
replay_batch = random.choice(self.replay_batches)
self.num_replayed += replay_batch.count
yield None, replay_batch
class SimpleAggregator(AggregationWorkerBase, Aggregator):
"""Simple single-threaded implementation of an Aggregator."""
def __init__(self,
workers,
max_sample_requests_in_flight_per_worker=2,
replay_proportion=0.0,
replay_buffer_num_slots=0,
train_batch_size=500,
sample_batch_size=50,
broadcast_interval=5):
self.workers = workers
self.local_worker = workers.local_worker()
self.broadcast_interval = broadcast_interval
self.broadcast_new_weights()
AggregationWorkerBase.__init__(
self, self.broadcasted_weights, self.workers.remote_workers(),
max_sample_requests_in_flight_per_worker, replay_proportion,
replay_buffer_num_slots, train_batch_size, sample_batch_size)
@override(Aggregator)
def broadcast_new_weights(self):
self.broadcasted_weights = ray.put(self.local_worker.get_weights())
self.num_sent_since_broadcast = 0
@override(Aggregator)
def should_broadcast(self):
return self.num_sent_since_broadcast >= self.broadcast_interval
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/aso_learner.py
|
Python
|
"""Helper class for AsyncSamplesOptimizer."""
import threading
from six.moves import queue
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.optimizers.aso_minibatch_buffer import MinibatchBuffer
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.window_stat import WindowStat
class LearnerThread(threading.Thread):
"""Background thread that updates the local model from sample trajectories.
This is for use with AsyncSamplesOptimizer.
The learner thread communicates with the main thread through Queues. This
is needed since Ray operations can only be run on the main thread. In
addition, moving heavyweight gradient ops session runs off the main thread
improves overall throughput.
"""
def __init__(self, local_worker, minibatch_buffer_size, num_sgd_iter,
learner_queue_size, learner_queue_timeout):
"""Initialize the learner thread.
Arguments:
local_worker (RolloutWorker): process local rollout worker holding
policies this thread will call learn_on_batch() on
minibatch_buffer_size (int): max number of train batches to store
in the minibatching buffer
num_sgd_iter (int): number of passes to learn on per train batch
learner_queue_size (int): max size of queue of inbound
train batches to this thread
learner_queue_timeout (int): raise an exception if the queue has
been empty for this long in seconds
"""
threading.Thread.__init__(self)
self.learner_queue_size = WindowStat("size", 50)
self.local_worker = local_worker
self.inqueue = queue.Queue(maxsize=learner_queue_size)
self.outqueue = queue.Queue()
self.minibatch_buffer = MinibatchBuffer(
inqueue=self.inqueue,
size=minibatch_buffer_size,
timeout=learner_queue_timeout,
num_passes=num_sgd_iter,
init_num_passes=num_sgd_iter)
self.queue_timer = TimerStat()
self.grad_timer = TimerStat()
self.load_timer = TimerStat()
self.load_wait_timer = TimerStat()
self.daemon = True
self.weights_updated = False
self.stats = {}
self.stopped = False
self.num_steps = 0
def run(self):
while not self.stopped:
self.step()
def step(self):
with self.queue_timer:
batch, _ = self.minibatch_buffer.get()
with self.grad_timer:
fetches = self.local_worker.learn_on_batch(batch)
self.weights_updated = True
self.stats = get_learner_stats(fetches)
self.num_steps += 1
self.outqueue.put(batch.count)
self.learner_queue_size.push(self.inqueue.qsize())
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/aso_minibatch_buffer.py
|
Python
|
"""Helper class for AsyncSamplesOptimizer."""
class MinibatchBuffer:
"""Ring buffer of recent data batches for minibatch SGD.
This is for use with AsyncSamplesOptimizer.
"""
def __init__(self, inqueue, size, timeout, num_passes, init_num_passes=1):
"""Initialize a minibatch buffer.
Arguments:
inqueue: Queue to populate the internal ring buffer from.
size: Max number of data items to buffer.
timeout: Queue timeout
num_passes: Max num times each data item should be emitted.
init_num_passes: Initial max passes for each data item
"""
self.inqueue = inqueue
self.size = size
self.timeout = timeout
self.max_ttl = num_passes
self.cur_max_ttl = init_num_passes
self.buffers = [None] * size
self.ttl = [0] * size
self.idx = 0
def get(self):
"""Get a new batch from the internal ring buffer.
Returns:
buf: Data item saved from inqueue.
released: True if the item is now removed from the ring buffer.
"""
if self.ttl[self.idx] <= 0:
self.buffers[self.idx] = self.inqueue.get(timeout=self.timeout)
self.ttl[self.idx] = self.cur_max_ttl
if self.cur_max_ttl < self.max_ttl:
self.cur_max_ttl += 1
buf = self.buffers[self.idx]
self.ttl[self.idx] -= 1
released = self.ttl[self.idx] <= 0
if released:
self.buffers[self.idx] = None
self.idx = (self.idx + 1) % len(self.buffers)
return buf, released
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/aso_multi_gpu_learner.py
|
Python
|
"""Helper class for AsyncSamplesOptimizer."""
import logging
import threading
import math
from six.moves import queue
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.optimizers.aso_learner import LearnerThread
from ray.rllib.optimizers.aso_minibatch_buffer import MinibatchBuffer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
class TFMultiGPULearner(LearnerThread):
"""Learner that can use multiple GPUs and parallel loading.
This is for use with AsyncSamplesOptimizer.
"""
def __init__(self,
local_worker,
num_gpus=1,
lr=0.0005,
train_batch_size=500,
num_data_loader_buffers=1,
minibatch_buffer_size=1,
num_sgd_iter=1,
learner_queue_size=16,
learner_queue_timeout=300,
num_data_load_threads=16,
_fake_gpus=False):
"""Initialize a multi-gpu learner thread.
Arguments:
local_worker (RolloutWorker): process local rollout worker holding
policies this thread will call learn_on_batch() on
num_gpus (int): number of GPUs to use for data-parallel SGD
lr (float): learning rate
train_batch_size (int): size of batches to learn on
num_data_loader_buffers (int): number of buffers to load data into
in parallel. Each buffer is of size of train_batch_size and
increases GPU memory usage proportionally.
minibatch_buffer_size (int): max number of train batches to store
in the minibatching buffer
num_sgd_iter (int): number of passes to learn on per train batch
learner_queue_size (int): max size of queue of inbound
train batches to this thread
num_data_loader_threads (int): number of threads to use to load
data into GPU memory in parallel
"""
LearnerThread.__init__(self, local_worker, minibatch_buffer_size,
num_sgd_iter, learner_queue_size,
learner_queue_timeout)
self.lr = lr
self.train_batch_size = train_batch_size
if not num_gpus:
self.devices = ["/cpu:0"]
elif _fake_gpus:
self.devices = [
"/cpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
else:
self.devices = [
"/gpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
logger.info("TFMultiGPULearner devices {}".format(self.devices))
assert self.train_batch_size % len(self.devices) == 0
assert self.train_batch_size >= len(self.devices), "batch too small"
if set(self.local_worker.policy_map.keys()) != {DEFAULT_POLICY_ID}:
raise NotImplementedError("Multi-gpu mode for multi-agent")
self.policy = self.local_worker.policy_map[DEFAULT_POLICY_ID]
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
self.par_opt = []
with self.local_worker.tf_sess.graph.as_default():
with self.local_worker.tf_sess.as_default():
with tf.variable_scope(DEFAULT_POLICY_ID, reuse=tf.AUTO_REUSE):
if self.policy._state_inputs:
rnn_inputs = self.policy._state_inputs + [
self.policy._seq_lens
]
else:
rnn_inputs = []
adam = tf.train.AdamOptimizer(self.lr)
for _ in range(num_data_loader_buffers):
self.par_opt.append(
LocalSyncParallelOptimizer(
adam,
self.devices,
[v for _, v in self.policy._loss_inputs],
rnn_inputs,
999999, # it will get rounded down
self.policy.copy))
self.sess = self.local_worker.tf_sess
self.sess.run(tf.global_variables_initializer())
self.idle_optimizers = queue.Queue()
self.ready_optimizers = queue.Queue()
for opt in self.par_opt:
self.idle_optimizers.put(opt)
for i in range(num_data_load_threads):
self.loader_thread = _LoaderThread(self, share_stats=(i == 0))
self.loader_thread.start()
self.minibatch_buffer = MinibatchBuffer(
self.ready_optimizers, minibatch_buffer_size,
learner_queue_timeout, num_sgd_iter)
@override(LearnerThread)
def step(self):
assert self.loader_thread.is_alive()
with self.load_wait_timer:
opt, released = self.minibatch_buffer.get()
with self.grad_timer:
fetches = opt.optimize(self.sess, 0)
self.weights_updated = True
self.stats = get_learner_stats(fetches)
if released:
self.idle_optimizers.put(opt)
self.outqueue.put(opt.num_tuples_loaded)
self.learner_queue_size.push(self.inqueue.qsize())
class _LoaderThread(threading.Thread):
def __init__(self, learner, share_stats):
threading.Thread.__init__(self)
self.learner = learner
self.daemon = True
if share_stats:
self.queue_timer = learner.queue_timer
self.load_timer = learner.load_timer
else:
self.queue_timer = TimerStat()
self.load_timer = TimerStat()
def run(self):
while True:
self._step()
def _step(self):
s = self.learner
with self.queue_timer:
batch = s.inqueue.get()
opt = s.idle_optimizers.get()
with self.load_timer:
tuples = s.policy._get_loss_inputs_dict(batch, shuffle=False)
data_keys = [ph for _, ph in s.policy._loss_inputs]
if s.policy._state_inputs:
state_keys = s.policy._state_inputs + [s.policy._seq_lens]
else:
state_keys = []
opt.load_data(s.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys])
s.ready_optimizers.put(opt)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/aso_tree_aggregator.py
|
Python
|
"""Helper class for AsyncSamplesOptimizer."""
import collections
import logging
import os
import time
import ray
from ray.rllib.utils.actors import TaskPool, create_colocated
from ray.rllib.utils.annotations import override
from ray.rllib.optimizers.aso_aggregator import Aggregator, \
AggregationWorkerBase
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
class TreeAggregator(Aggregator):
"""A hierarchical experiences aggregator.
The given set of remote workers is divided into subsets and assigned to
one of several aggregation workers. These aggregation workers collate
experiences into batches of size `train_batch_size` and we collect them
in this class when `iter_train_batches` is called.
"""
def __init__(self,
workers,
num_aggregation_workers,
max_sample_requests_in_flight_per_worker=2,
replay_proportion=0.0,
replay_buffer_num_slots=0,
train_batch_size=500,
sample_batch_size=50,
broadcast_interval=5):
"""Initialize a tree aggregator.
Arguments:
workers (WorkerSet): set of all workers
num_aggregation_workers (int): number of intermediate actors to
use for data aggregation
max_sample_request_in_flight_per_worker (int): max queue size per
worker
replay_proportion (float): ratio of replay to sampled outputs
replay_buffer_num_slots (int): max number of sample batches to
store in the replay buffer
train_batch_size (int): size of batches to learn on
sample_batch_size (int): size of batches to sample from workers
broadcast_interval (int): max number of workers to send the
same set of weights to
"""
self.workers = workers
self.num_aggregation_workers = num_aggregation_workers
self.max_sample_requests_in_flight_per_worker = \
max_sample_requests_in_flight_per_worker
self.replay_proportion = replay_proportion
self.replay_buffer_num_slots = replay_buffer_num_slots
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
self.broadcast_interval = broadcast_interval
self.broadcasted_weights = ray.put(
workers.local_worker().get_weights())
self.num_batches_processed = 0
self.num_broadcasts = 0
self.num_sent_since_broadcast = 0
self.initialized = False
def init(self, aggregators):
"""Deferred init so that we can pass in previously created workers."""
assert len(aggregators) == self.num_aggregation_workers, aggregators
if len(self.workers.remote_workers()) < self.num_aggregation_workers:
raise ValueError(
"The number of aggregation workers should not exceed the "
"number of total evaluation workers ({} vs {})".format(
self.num_aggregation_workers,
len(self.workers.remote_workers())))
assigned_workers = collections.defaultdict(list)
for i, ev in enumerate(self.workers.remote_workers()):
assigned_workers[i % self.num_aggregation_workers].append(ev)
self.aggregators = aggregators
for i, agg in enumerate(self.aggregators):
agg.init.remote(self.broadcasted_weights, assigned_workers[i],
self.max_sample_requests_in_flight_per_worker,
self.replay_proportion,
self.replay_buffer_num_slots,
self.train_batch_size, self.sample_batch_size)
self.agg_tasks = TaskPool()
for agg in self.aggregators:
agg.set_weights.remote(self.broadcasted_weights)
self.agg_tasks.add(agg, agg.get_train_batches.remote())
self.initialized = True
@override(Aggregator)
def iter_train_batches(self):
assert self.initialized, "Must call init() before using this class."
for agg, batches in self.agg_tasks.completed_prefetch():
for b in ray_get_and_free(batches):
self.num_sent_since_broadcast += 1
yield b
agg.set_weights.remote(self.broadcasted_weights)
self.agg_tasks.add(agg, agg.get_train_batches.remote())
self.num_batches_processed += 1
@override(Aggregator)
def broadcast_new_weights(self):
self.broadcasted_weights = ray.put(
self.workers.local_worker().get_weights())
self.num_sent_since_broadcast = 0
self.num_broadcasts += 1
@override(Aggregator)
def should_broadcast(self):
return self.num_sent_since_broadcast >= self.broadcast_interval
@override(Aggregator)
def stats(self):
return {
"num_broadcasts": self.num_broadcasts,
"num_batches_processed": self.num_batches_processed,
}
@override(Aggregator)
def reset(self, remote_workers):
raise NotImplementedError("changing number of remote workers")
@staticmethod
def precreate_aggregators(n):
return create_colocated(AggregationWorker, [], n)
@ray.remote(num_cpus=1)
class AggregationWorker(AggregationWorkerBase):
def __init__(self):
self.initialized = False
def init(self, initial_weights_obj_id, remote_workers,
max_sample_requests_in_flight_per_worker, replay_proportion,
replay_buffer_num_slots, train_batch_size, sample_batch_size):
"""Deferred init that assigns sub-workers to this aggregator."""
logger.info("Assigned workers {} to aggregation worker {}".format(
remote_workers, self))
assert remote_workers
AggregationWorkerBase.__init__(
self, initial_weights_obj_id, remote_workers,
max_sample_requests_in_flight_per_worker, replay_proportion,
replay_buffer_num_slots, train_batch_size, sample_batch_size)
self.initialized = True
def set_weights(self, weights):
self.broadcasted_weights = weights
def get_train_batches(self):
assert self.initialized, "Must call init() before using this class."
start = time.time()
result = []
for batch in self.iter_train_batches(max_yield=5):
result.append(batch)
while not result:
time.sleep(0.01)
for batch in self.iter_train_batches(max_yield=5):
result.append(batch)
logger.debug("Returning {} train batches, {}s".format(
len(result),
time.time() - start))
return result
def get_host(self):
return os.uname()[1]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/async_gradients_optimizer.py
|
Python
|
import ray
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.memory import ray_get_and_free
class AsyncGradientsOptimizer(PolicyOptimizer):
"""An asynchronous RL optimizer, e.g. for implementing A3C.
This optimizer asynchronously pulls and applies gradients from remote
workers, sending updated weights back as needed. This pipelines the
gradient computations on the remote workers.
"""
def __init__(self, workers, grads_per_step=100):
"""Initialize an async gradients optimizer.
Arguments:
grads_per_step (int): The number of gradients to collect and apply
per each call to step(). This number should be sufficiently
high to amortize the overhead of calling step().
"""
PolicyOptimizer.__init__(self, workers)
self.apply_timer = TimerStat()
self.wait_timer = TimerStat()
self.dispatch_timer = TimerStat()
self.grads_per_step = grads_per_step
self.learner_stats = {}
if not self.workers.remote_workers():
raise ValueError(
"Async optimizer requires at least 1 remote workers")
@override(PolicyOptimizer)
def step(self):
weights = ray.put(self.workers.local_worker().get_weights())
pending_gradients = {}
num_gradients = 0
# Kick off the first wave of async tasks
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
future = e.compute_gradients.remote(e.sample.remote())
pending_gradients[future] = e
num_gradients += 1
while pending_gradients:
with self.wait_timer:
wait_results = ray.wait(
list(pending_gradients.keys()), num_returns=1)
ready_list = wait_results[0]
future = ready_list[0]
gradient, info = ray_get_and_free(future)
e = pending_gradients.pop(future)
self.learner_stats = get_learner_stats(info)
if gradient is not None:
with self.apply_timer:
self.workers.local_worker().apply_gradients(gradient)
self.num_steps_sampled += info["batch_count"]
self.num_steps_trained += info["batch_count"]
if num_gradients < self.grads_per_step:
with self.dispatch_timer:
e.set_weights.remote(
self.workers.local_worker().get_weights())
future = e.compute_gradients.remote(e.sample.remote())
pending_gradients[future] = e
num_gradients += 1
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"wait_time_ms": round(1000 * self.wait_timer.mean, 3),
"apply_time_ms": round(1000 * self.apply_timer.mean, 3),
"dispatch_time_ms": round(1000 * self.dispatch_timer.mean, 3),
"learner": self.learner_stats,
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/async_replay_optimizer.py
|
Python
|
"""Implements Distributed Prioritized Experience Replay.
https://arxiv.org/abs/1803.00933"""
import collections
import os
import random
import time
import threading
import numpy as np
from six.moves import queue
import ray
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.replay_buffer import PrioritizedReplayBuffer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.actors import TaskPool, create_colocated
from ray.rllib.utils.memory import ray_get_and_free
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.window_stat import WindowStat
SAMPLE_QUEUE_DEPTH = 2
REPLAY_QUEUE_DEPTH = 4
LEARNER_QUEUE_MAX_SIZE = 16
class AsyncReplayOptimizer(PolicyOptimizer):
"""Main event loop of the Ape-X optimizer (async sampling with replay).
This class coordinates the data transfers between the learner thread,
remote workers (Ape-X actors), and replay buffer actors.
This has two modes of operation:
- normal replay: replays independent samples.
- batch replay: simplified mode where entire sample batches are
replayed. This supports RNNs, but not prioritization.
This optimizer requires that rollout workers return an additional
"td_error" array in the info return of compute_gradients(). This error
term will be used for sample prioritization."""
def __init__(self,
workers,
learning_starts=1000,
buffer_size=10000,
prioritized_replay=True,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=1e-6,
train_batch_size=512,
sample_batch_size=50,
num_replay_buffer_shards=1,
max_weight_sync_delay=400,
debug=False,
batch_replay=False):
"""Initialize an async replay optimizer.
Arguments:
workers (WorkerSet): all workers
learning_starts (int): wait until this many steps have been sampled
before starting optimization.
buffer_size (int): max size of the replay buffer
prioritized_replay (bool): whether to enable prioritized replay
prioritized_replay_alpha (float): replay alpha hyperparameter
prioritized_replay_beta (float): replay beta hyperparameter
prioritized_replay_eps (float): replay eps hyperparameter
train_batch_size (int): size of batches to learn on
sample_batch_size (int): size of batches to sample from workers
num_replay_buffer_shards (int): number of actors to use to store
replay samples
max_weight_sync_delay (int): update the weights of a rollout worker
after collecting this number of timesteps from it
debug (bool): return extra debug stats
batch_replay (bool): replay entire sequential batches of
experiences instead of sampling steps individually
"""
PolicyOptimizer.__init__(self, workers)
self.debug = debug
self.batch_replay = batch_replay
self.replay_starts = learning_starts
self.prioritized_replay_beta = prioritized_replay_beta
self.prioritized_replay_eps = prioritized_replay_eps
self.max_weight_sync_delay = max_weight_sync_delay
self.learner = LearnerThread(self.workers.local_worker())
self.learner.start()
if self.batch_replay:
replay_cls = BatchReplayActor
else:
replay_cls = ReplayActor
self.replay_actors = create_colocated(replay_cls, [
num_replay_buffer_shards,
learning_starts,
buffer_size,
train_batch_size,
prioritized_replay_alpha,
prioritized_replay_beta,
prioritized_replay_eps,
], num_replay_buffer_shards)
# Stats
self.timers = {
k: TimerStat()
for k in [
"put_weights", "get_samples", "sample_processing",
"replay_processing", "update_priorities", "train", "sample"
]
}
self.num_weight_syncs = 0
self.num_samples_dropped = 0
self.learning_started = False
# Number of worker steps since the last weight update
self.steps_since_update = {}
# Otherwise kick of replay tasks for local gradient updates
self.replay_tasks = TaskPool()
for ra in self.replay_actors:
for _ in range(REPLAY_QUEUE_DEPTH):
self.replay_tasks.add(ra, ra.replay.remote())
# Kick off async background sampling
self.sample_tasks = TaskPool()
if self.workers.remote_workers():
self._set_workers(self.workers.remote_workers())
@override(PolicyOptimizer)
def step(self):
assert self.learner.is_alive()
assert len(self.workers.remote_workers()) > 0
start = time.time()
sample_timesteps, train_timesteps = self._step()
time_delta = time.time() - start
self.timers["sample"].push(time_delta)
self.timers["sample"].push_units_processed(sample_timesteps)
if train_timesteps > 0:
self.learning_started = True
if self.learning_started:
self.timers["train"].push(time_delta)
self.timers["train"].push_units_processed(train_timesteps)
self.num_steps_sampled += sample_timesteps
self.num_steps_trained += train_timesteps
@override(PolicyOptimizer)
def stop(self):
for r in self.replay_actors:
r.__ray_terminate__.remote()
self.learner.stopped = True
@override(PolicyOptimizer)
def reset(self, remote_workers):
self.workers.reset(remote_workers)
self.sample_tasks.reset_workers(remote_workers)
@override(PolicyOptimizer)
def stats(self):
replay_stats = ray_get_and_free(self.replay_actors[0].stats.remote(
self.debug))
timing = {
"{}_time_ms".format(k): round(1000 * self.timers[k].mean, 3)
for k in self.timers
}
timing["learner_grad_time_ms"] = round(
1000 * self.learner.grad_timer.mean, 3)
timing["learner_dequeue_time_ms"] = round(
1000 * self.learner.queue_timer.mean, 3)
stats = {
"sample_throughput": round(self.timers["sample"].mean_throughput,
3),
"train_throughput": round(self.timers["train"].mean_throughput, 3),
"num_weight_syncs": self.num_weight_syncs,
"num_samples_dropped": self.num_samples_dropped,
"learner_queue": self.learner.learner_queue_size.stats(),
"replay_shard_0": replay_stats,
}
debug_stats = {
"timing_breakdown": timing,
"pending_sample_tasks": self.sample_tasks.count,
"pending_replay_tasks": self.replay_tasks.count,
}
if self.debug:
stats.update(debug_stats)
if self.learner.stats:
stats["learner"] = self.learner.stats
return dict(PolicyOptimizer.stats(self), **stats)
# For https://github.com/ray-project/ray/issues/2541 only
def _set_workers(self, remote_workers):
self.workers.reset(remote_workers)
weights = self.workers.local_worker().get_weights()
for ev in self.workers.remote_workers():
ev.set_weights.remote(weights)
self.steps_since_update[ev] = 0
for _ in range(SAMPLE_QUEUE_DEPTH):
self.sample_tasks.add(ev, ev.sample_with_count.remote())
def _step(self):
sample_timesteps, train_timesteps = 0, 0
weights = None
with self.timers["sample_processing"]:
completed = list(self.sample_tasks.completed())
counts = ray_get_and_free([c[1][1] for c in completed])
for i, (ev, (sample_batch, count)) in enumerate(completed):
sample_timesteps += counts[i]
# Send the data to the replay buffer
random.choice(
self.replay_actors).add_batch.remote(sample_batch)
# Update weights if needed
self.steps_since_update[ev] += counts[i]
if self.steps_since_update[ev] >= self.max_weight_sync_delay:
# Note that it's important to pull new weights once
# updated to avoid excessive correlation between actors
if weights is None or self.learner.weights_updated:
self.learner.weights_updated = False
with self.timers["put_weights"]:
weights = ray.put(
self.workers.local_worker().get_weights())
ev.set_weights.remote(weights)
self.num_weight_syncs += 1
self.steps_since_update[ev] = 0
# Kick off another sample request
self.sample_tasks.add(ev, ev.sample_with_count.remote())
with self.timers["replay_processing"]:
for ra, replay in self.replay_tasks.completed():
self.replay_tasks.add(ra, ra.replay.remote())
if self.learner.inqueue.full():
self.num_samples_dropped += 1
else:
with self.timers["get_samples"]:
samples = ray_get_and_free(replay)
# Defensive copy against plasma crashes, see #2610 #3452
self.learner.inqueue.put((ra, samples and samples.copy()))
with self.timers["update_priorities"]:
while not self.learner.outqueue.empty():
ra, prio_dict, count = self.learner.outqueue.get()
ra.update_priorities.remote(prio_dict)
train_timesteps += count
return sample_timesteps, train_timesteps
@ray.remote(num_cpus=0)
class ReplayActor:
"""A replay buffer shard.
Ray actors are single-threaded, so for scalability multiple replay actors
may be created to increase parallelism."""
def __init__(self, num_shards, learning_starts, buffer_size,
train_batch_size, prioritized_replay_alpha,
prioritized_replay_beta, prioritized_replay_eps):
self.replay_starts = learning_starts // num_shards
self.buffer_size = buffer_size // num_shards
self.train_batch_size = train_batch_size
self.prioritized_replay_beta = prioritized_replay_beta
self.prioritized_replay_eps = prioritized_replay_eps
def new_buffer():
return PrioritizedReplayBuffer(
self.buffer_size, alpha=prioritized_replay_alpha)
self.replay_buffers = collections.defaultdict(new_buffer)
# Metrics
self.add_batch_timer = TimerStat()
self.replay_timer = TimerStat()
self.update_priorities_timer = TimerStat()
self.num_added = 0
def get_host(self):
return os.uname()[1]
def add_batch(self, batch):
# Handle everything as if multiagent
if isinstance(batch, SampleBatch):
batch = MultiAgentBatch({DEFAULT_POLICY_ID: batch}, batch.count)
with self.add_batch_timer:
for policy_id, s in batch.policy_batches.items():
for row in s.rows():
self.replay_buffers[policy_id].add(
row["obs"], row["actions"], row["rewards"],
row["new_obs"], row["dones"], row["weights"])
self.num_added += batch.count
def replay(self):
if self.num_added < self.replay_starts:
return None
with self.replay_timer:
samples = {}
for policy_id, replay_buffer in self.replay_buffers.items():
(obses_t, actions, rewards, obses_tp1, dones, weights,
batch_indexes) = replay_buffer.sample(
self.train_batch_size, beta=self.prioritized_replay_beta)
samples[policy_id] = SampleBatch({
"obs": obses_t,
"actions": actions,
"rewards": rewards,
"new_obs": obses_tp1,
"dones": dones,
"weights": weights,
"batch_indexes": batch_indexes
})
return MultiAgentBatch(samples, self.train_batch_size)
def update_priorities(self, prio_dict):
with self.update_priorities_timer:
for policy_id, (batch_indexes, td_errors) in prio_dict.items():
new_priorities = (
np.abs(td_errors) + self.prioritized_replay_eps)
self.replay_buffers[policy_id].update_priorities(
batch_indexes, new_priorities)
def stats(self, debug=False):
stat = {
"add_batch_time_ms": round(1000 * self.add_batch_timer.mean, 3),
"replay_time_ms": round(1000 * self.replay_timer.mean, 3),
"update_priorities_time_ms": round(
1000 * self.update_priorities_timer.mean, 3),
}
for policy_id, replay_buffer in self.replay_buffers.items():
stat.update({
"policy_{}".format(policy_id): replay_buffer.stats(debug=debug)
})
return stat
# note: we set num_cpus=0 to avoid failing to create replay actors when
# resources are fragmented. This isn't ideal.
@ray.remote(num_cpus=0)
class BatchReplayActor:
"""The batch replay version of the replay actor.
This allows for RNN models, but ignores prioritization params.
"""
def __init__(self, num_shards, learning_starts, buffer_size,
train_batch_size, prioritized_replay_alpha,
prioritized_replay_beta, prioritized_replay_eps):
self.replay_starts = learning_starts // num_shards
self.buffer_size = buffer_size // num_shards
self.train_batch_size = train_batch_size
self.buffer = []
# Metrics
self.num_added = 0
self.cur_size = 0
def get_host(self):
return os.uname()[1]
def add_batch(self, batch):
# Handle everything as if multiagent
if isinstance(batch, SampleBatch):
batch = MultiAgentBatch({DEFAULT_POLICY_ID: batch}, batch.count)
self.buffer.append(batch)
self.cur_size += batch.count
self.num_added += batch.count
while self.cur_size > self.buffer_size:
self.cur_size -= self.buffer.pop(0).count
def replay(self):
if self.num_added < self.replay_starts:
return None
return random.choice(self.buffer)
def update_priorities(self, prio_dict):
pass
def stats(self, debug=False):
stat = {
"cur_size": self.cur_size,
"num_added": self.num_added,
}
return stat
class LearnerThread(threading.Thread):
"""Background thread that updates the local model from replay data.
The learner thread communicates with the main thread through Queues. This
is needed since Ray operations can only be run on the main thread. In
addition, moving heavyweight gradient ops session runs off the main thread
improves overall throughput.
"""
def __init__(self, local_worker):
threading.Thread.__init__(self)
self.learner_queue_size = WindowStat("size", 50)
self.local_worker = local_worker
self.inqueue = queue.Queue(maxsize=LEARNER_QUEUE_MAX_SIZE)
self.outqueue = queue.Queue()
self.queue_timer = TimerStat()
self.grad_timer = TimerStat()
self.daemon = True
self.weights_updated = False
self.stopped = False
self.stats = {}
def run(self):
while not self.stopped:
self.step()
def step(self):
with self.queue_timer:
ra, replay = self.inqueue.get()
if replay is not None:
prio_dict = {}
with self.grad_timer:
grad_out = self.local_worker.learn_on_batch(replay)
for pid, info in grad_out.items():
prio_dict[pid] = (
replay.policy_batches[pid].data.get("batch_indexes"),
info.get("td_error"))
self.stats[pid] = get_learner_stats(info)
self.outqueue.put((ra, prio_dict, replay.count))
self.learner_queue_size.push(self.inqueue.qsize())
self.weights_updated = True
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/async_samples_optimizer.py
|
Python
|
"""Implements the IMPALA asynchronous sampling architecture.
https://arxiv.org/abs/1802.01561"""
import logging
import time
from ray.rllib.optimizers.aso_aggregator import SimpleAggregator
from ray.rllib.optimizers.aso_tree_aggregator import TreeAggregator
from ray.rllib.optimizers.aso_learner import LearnerThread
from ray.rllib.optimizers.aso_multi_gpu_learner import TFMultiGPULearner
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
logger = logging.getLogger(__name__)
class AsyncSamplesOptimizer(PolicyOptimizer):
"""Main event loop of the IMPALA architecture.
This class coordinates the data transfers between the learner thread
and remote workers (IMPALA actors).
"""
def __init__(self,
workers,
train_batch_size=500,
sample_batch_size=50,
num_envs_per_worker=1,
num_gpus=0,
lr=0.0005,
replay_buffer_num_slots=0,
replay_proportion=0.0,
num_data_loader_buffers=1,
max_sample_requests_in_flight_per_worker=2,
broadcast_interval=1,
num_sgd_iter=1,
minibatch_buffer_size=1,
learner_queue_size=16,
learner_queue_timeout=300,
num_aggregation_workers=0,
_fake_gpus=False):
PolicyOptimizer.__init__(self, workers)
self._stats_start_time = time.time()
self._last_stats_time = {}
self._last_stats_sum = {}
if num_gpus > 1 or num_data_loader_buffers > 1:
logger.info(
"Enabling multi-GPU mode, {} GPUs, {} parallel loaders".format(
num_gpus, num_data_loader_buffers))
if num_data_loader_buffers < minibatch_buffer_size:
raise ValueError(
"In multi-gpu mode you must have at least as many "
"parallel data loader buffers as minibatch buffers: "
"{} vs {}".format(num_data_loader_buffers,
minibatch_buffer_size))
self.learner = TFMultiGPULearner(
self.workers.local_worker(),
lr=lr,
num_gpus=num_gpus,
train_batch_size=train_batch_size,
num_data_loader_buffers=num_data_loader_buffers,
minibatch_buffer_size=minibatch_buffer_size,
num_sgd_iter=num_sgd_iter,
learner_queue_size=learner_queue_size,
learner_queue_timeout=learner_queue_timeout,
_fake_gpus=_fake_gpus)
else:
self.learner = LearnerThread(
self.workers.local_worker(),
minibatch_buffer_size=minibatch_buffer_size,
num_sgd_iter=num_sgd_iter,
learner_queue_size=learner_queue_size,
learner_queue_timeout=learner_queue_timeout)
self.learner.start()
# Stats
self._optimizer_step_timer = TimerStat()
self._stats_start_time = time.time()
self._last_stats_time = {}
if num_aggregation_workers > 0:
self.aggregator = TreeAggregator(
workers,
num_aggregation_workers,
replay_proportion=replay_proportion,
max_sample_requests_in_flight_per_worker=(
max_sample_requests_in_flight_per_worker),
replay_buffer_num_slots=replay_buffer_num_slots,
train_batch_size=train_batch_size,
sample_batch_size=sample_batch_size,
broadcast_interval=broadcast_interval)
else:
self.aggregator = SimpleAggregator(
workers,
replay_proportion=replay_proportion,
max_sample_requests_in_flight_per_worker=(
max_sample_requests_in_flight_per_worker),
replay_buffer_num_slots=replay_buffer_num_slots,
train_batch_size=train_batch_size,
sample_batch_size=sample_batch_size,
broadcast_interval=broadcast_interval)
def add_stat_val(self, key, val):
if key not in self._last_stats_sum:
self._last_stats_sum[key] = 0
self._last_stats_time[key] = self._stats_start_time
self._last_stats_sum[key] += val
def get_mean_stats_and_reset(self):
now = time.time()
mean_stats = {
key: round(val / (now - self._last_stats_time[key]), 3)
for key, val in self._last_stats_sum.items()
}
for key in self._last_stats_sum.keys():
self._last_stats_sum[key] = 0
self._last_stats_time[key] = time.time()
return mean_stats
@override(PolicyOptimizer)
def step(self):
if len(self.workers.remote_workers()) == 0:
raise ValueError("Config num_workers=0 means training will hang!")
assert self.learner.is_alive()
with self._optimizer_step_timer:
sample_timesteps, train_timesteps = self._step()
if sample_timesteps > 0:
self.add_stat_val("sample_throughput", sample_timesteps)
if train_timesteps > 0:
self.add_stat_val("train_throughput", train_timesteps)
self.num_steps_sampled += sample_timesteps
self.num_steps_trained += train_timesteps
@override(PolicyOptimizer)
def stop(self):
self.learner.stopped = True
@override(PolicyOptimizer)
def reset(self, remote_workers):
self.workers.reset(remote_workers)
self.aggregator.reset(remote_workers)
@override(PolicyOptimizer)
def stats(self):
def timer_to_ms(timer):
return round(1000 * timer.mean, 3)
stats = self.aggregator.stats()
stats.update(self.get_mean_stats_and_reset())
stats["timing_breakdown"] = {
"optimizer_step_time_ms": timer_to_ms(self._optimizer_step_timer),
"learner_grad_time_ms": timer_to_ms(self.learner.grad_timer),
"learner_load_time_ms": timer_to_ms(self.learner.load_timer),
"learner_load_wait_time_ms": timer_to_ms(
self.learner.load_wait_timer),
"learner_dequeue_time_ms": timer_to_ms(self.learner.queue_timer),
}
stats["learner_queue"] = self.learner.learner_queue_size.stats()
if self.learner.stats:
stats["learner"] = self.learner.stats
return dict(PolicyOptimizer.stats(self), **stats)
def _step(self):
sample_timesteps, train_timesteps = 0, 0
for train_batch in self.aggregator.iter_train_batches():
sample_timesteps += train_batch.count
self.learner.inqueue.put(train_batch)
if (self.learner.weights_updated
and self.aggregator.should_broadcast()):
self.aggregator.broadcast_new_weights()
while not self.learner.outqueue.empty():
count = self.learner.outqueue.get()
train_timesteps += count
return sample_timesteps, train_timesteps
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/microbatch_optimizer.py
|
Python
|
import logging
import ray
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.filter import RunningStat
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
class MicrobatchOptimizer(PolicyOptimizer):
"""A microbatching synchronous RL optimizer.
This optimizer pulls sample batches from workers until the target
microbatch size is reached. Then, it computes and accumulates the policy
gradient in a local buffer. This process is repeated until the number of
samples collected equals the train batch size. Then, an accumulated
gradient update is made.
This allows for training with effective batch sizes much larger than can
fit in GPU or host memory.
"""
def __init__(self, workers, train_batch_size=10000, microbatch_size=1000):
PolicyOptimizer.__init__(self, workers)
if train_batch_size <= microbatch_size:
raise ValueError(
"The microbatch size must be smaller than the train batch "
"size, got {} vs {}".format(microbatch_size, train_batch_size))
self.update_weights_timer = TimerStat()
self.sample_timer = TimerStat()
self.grad_timer = TimerStat()
self.throughput = RunningStat()
self.train_batch_size = train_batch_size
self.microbatch_size = microbatch_size
self.learner_stats = {}
self.policies = dict(self.workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
fetches = {}
accumulated_gradients = {}
samples_so_far = 0
# Accumulate minibatches.
i = 0
while samples_so_far < self.train_batch_size:
i += 1
with self.sample_timer:
samples = []
while sum(s.count for s in samples) < self.microbatch_size:
if self.workers.remote_workers():
samples.extend(
ray_get_and_free([
e.sample.remote()
for e in self.workers.remote_workers()
]))
else:
samples.append(self.workers.local_worker().sample())
samples = SampleBatch.concat_samples(samples)
self.sample_timer.push_units_processed(samples.count)
samples_so_far += samples.count
logger.info(
"Computing gradients for microbatch {} ({}/{} samples)".format(
i, samples_so_far, self.train_batch_size))
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
with self.grad_timer:
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
grad_out, info_out = (
self.workers.local_worker().compute_gradients(
MultiAgentBatch({
policy_id: batch
}, batch.count)))
grad = grad_out[policy_id]
fetches.update(info_out)
if policy_id not in accumulated_gradients:
accumulated_gradients[policy_id] = grad
else:
grad_size = len(accumulated_gradients[policy_id])
assert grad_size == len(grad), (grad_size, len(grad))
c = []
for a, b in zip(accumulated_gradients[policy_id],
grad):
c.append(a + b)
accumulated_gradients[policy_id] = c
self.grad_timer.push_units_processed(samples.count)
# Apply the accumulated gradient
logger.info("Applying accumulated gradients ({} samples)".format(
samples_so_far))
self.workers.local_worker().apply_gradients(accumulated_gradients)
if len(fetches) == 1 and DEFAULT_POLICY_ID in fetches:
self.learner_stats = fetches[DEFAULT_POLICY_ID]
else:
self.learner_stats = fetches
self.num_steps_sampled += samples_so_far
self.num_steps_trained += samples_so_far
return self.learner_stats
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"opt_peak_throughput": round(self.grad_timer.mean_throughput,
3),
"sample_peak_throughput": round(
self.sample_timer.mean_throughput, 3),
"opt_samples": round(self.grad_timer.mean_units_processed, 3),
"learner": self.learner_stats,
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/multi_gpu_impl.py
|
Python
|
from collections import namedtuple
import logging
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# Variable scope in which created variables will be placed under
TOWER_SCOPE_NAME = "tower"
logger = logging.getLogger(__name__)
class LocalSyncParallelOptimizer:
"""Optimizer that runs in parallel across multiple local devices.
LocalSyncParallelOptimizer automatically splits up and loads training data
onto specified local devices (e.g. GPUs) with `load_data()`. During a call
to `optimize()`, the devices compute gradients over slices of the data in
parallel. The gradients are then averaged and applied to the shared
weights.
The data loaded is pinned in device memory until the next call to
`load_data`, so you can make multiple passes (possibly in randomized order)
over the same data once loaded.
This is similar to tf.train.SyncReplicasOptimizer, but works within a
single TensorFlow graph, i.e. implements in-graph replicated training:
https://www.tensorflow.org/api_docs/python/tf/train/SyncReplicasOptimizer
Args:
optimizer: Delegate TensorFlow optimizer object.
devices: List of the names of TensorFlow devices to parallelize over.
input_placeholders: List of input_placeholders for the loss function.
Tensors of these shapes will be passed to build_graph() in order
to define the per-device loss ops.
rnn_inputs: Extra input placeholders for RNN inputs. These will have
shape [BATCH_SIZE // MAX_SEQ_LEN, ...].
max_per_device_batch_size: Number of tuples to optimize over at a time
per device. In each call to `optimize()`,
`len(devices) * per_device_batch_size` tuples of data will be
processed. If this is larger than the total data size, it will be
clipped.
build_graph: Function that takes the specified inputs and returns a
TF Policy instance.
"""
def __init__(self,
optimizer,
devices,
input_placeholders,
rnn_inputs,
max_per_device_batch_size,
build_graph,
grad_norm_clipping=None):
self.optimizer = optimizer
self.devices = devices
self.max_per_device_batch_size = max_per_device_batch_size
self.loss_inputs = input_placeholders + rnn_inputs
self.build_graph = build_graph
# First initialize the shared loss network
with tf.name_scope(TOWER_SCOPE_NAME):
self._shared_loss = build_graph(self.loss_inputs)
shared_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope=tf.get_variable_scope().name)
# Then setup the per-device loss graphs that use the shared weights
self._batch_index = tf.placeholder(tf.int32, name="batch_index")
# Dynamic batch size, which may be shrunk if there isn't enough data
self._per_device_batch_size = tf.placeholder(
tf.int32, name="per_device_batch_size")
self._loaded_per_device_batch_size = max_per_device_batch_size
# When loading RNN input, we dynamically determine the max seq len
self._max_seq_len = tf.placeholder(tf.int32, name="max_seq_len")
self._loaded_max_seq_len = 1
# Split on the CPU in case the data doesn't fit in GPU memory.
with tf.device("/cpu:0"):
data_splits = zip(
*[tf.split(ph, len(devices)) for ph in self.loss_inputs])
self._towers = []
for device, device_placeholders in zip(self.devices, data_splits):
self._towers.append(
self._setup_device(device, device_placeholders,
len(input_placeholders)))
avg = average_gradients([t.grads for t in self._towers])
if grad_norm_clipping:
clipped = []
for grad, _ in avg:
clipped.append(grad)
clipped, _ = tf.clip_by_global_norm(clipped, grad_norm_clipping)
for i, (grad, var) in enumerate(avg):
avg[i] = (clipped[i], var)
# gather update ops for any batch norm layers. TODO(ekl) here we will
# use all the ops found which won't work for DQN / DDPG, but those
# aren't supported with multi-gpu right now anyways.
self._update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope=tf.get_variable_scope().name)
for op in shared_ops:
self._update_ops.remove(op) # only care about tower update ops
if self._update_ops:
logger.debug("Update ops to run on apply gradient: {}".format(
self._update_ops))
with tf.control_dependencies(self._update_ops):
self._train_op = self.optimizer.apply_gradients(avg)
def load_data(self, sess, inputs, state_inputs):
"""Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
"""
if log_once("load_data"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize({
"placeholders": self.loss_inputs,
"inputs": inputs,
"state_inputs": state_inputs
})))
feed_dict = {}
assert len(self.loss_inputs) == len(inputs + state_inputs), \
(self.loss_inputs, inputs, state_inputs)
# Let's suppose we have the following input data, and 2 devices:
# 1 2 3 4 5 6 7 <- state inputs shape
# A A A B B B C C C D D D E E E F F F G G G <- inputs shape
# The data is truncated and split across devices as follows:
# |---| seq len = 3
# |---------------------------------| seq batch size = 6 seqs
# |----------------| per device batch size = 9 tuples
if len(state_inputs) > 0:
smallest_array = state_inputs[0]
seq_len = len(inputs[0]) // len(state_inputs[0])
self._loaded_max_seq_len = seq_len
else:
smallest_array = inputs[0]
self._loaded_max_seq_len = 1
sequences_per_minibatch = (
self.max_per_device_batch_size // self._loaded_max_seq_len * len(
self.devices))
if sequences_per_minibatch < 1:
logger.warning(
("Target minibatch size is {}, however the rollout sequence "
"length is {}, hence the minibatch size will be raised to "
"{}.").format(self.max_per_device_batch_size,
self._loaded_max_seq_len,
self._loaded_max_seq_len * len(self.devices)))
sequences_per_minibatch = 1
if len(smallest_array) < sequences_per_minibatch:
# Dynamically shrink the batch size if insufficient data
sequences_per_minibatch = make_divisible_by(
len(smallest_array), len(self.devices))
if log_once("data_slicing"):
logger.info(
("Divided {} rollout sequences, each of length {}, among "
"{} devices.").format(
len(smallest_array), self._loaded_max_seq_len,
len(self.devices)))
if sequences_per_minibatch < len(self.devices):
raise ValueError(
"Must load at least 1 tuple sequence per device. Try "
"increasing `sgd_minibatch_size` or reducing `max_seq_len` "
"to ensure that at least one sequence fits per device.")
self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
self.devices) * self._loaded_max_seq_len)
if len(state_inputs) > 0:
# First truncate the RNN state arrays to the sequences_per_minib.
state_inputs = [
make_divisible_by(arr, sequences_per_minibatch)
for arr in state_inputs
]
# Then truncate the data inputs to match
inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
(len(state_inputs[0]), sequences_per_minibatch, seq_len,
len(inputs[0]))
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
feed_dict[ph] = arr
truncated_len = len(inputs[0])
else:
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
feed_dict[ph] = truncated_arr
truncated_len = len(truncated_arr)
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
self.num_tuples_loaded = truncated_len
tuples_per_device = truncated_len // len(self.devices)
assert tuples_per_device > 0, "No data loaded?"
assert tuples_per_device % self._loaded_per_device_batch_size == 0
return tuples_per_device
def optimize(self, sess, batch_index):
"""Run a single step of SGD.
Runs a SGD step over a slice of the preloaded batch with size given by
self._loaded_per_device_batch_size and offset given by the batch_index
argument.
Updates shared model weights based on the averaged per-device
gradients.
Args:
sess: TensorFlow session.
batch_index: Offset into the preloaded data. This value must be
between `0` and `tuples_per_device`. The amount of data to
process is at most `max_per_device_batch_size`.
Returns:
The outputs of extra_ops evaluated over the batch.
"""
feed_dict = {
self._batch_index: batch_index,
self._per_device_batch_size: self._loaded_per_device_batch_size,
self._max_seq_len: self._loaded_max_seq_len,
}
for tower in self._towers:
feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())
fetches = {"train": self._train_op}
for tower in self._towers:
fetches.update(tower.loss_graph._get_grad_and_stats_fetches())
return sess.run(fetches, feed_dict=feed_dict)
def get_common_loss(self):
return self._shared_loss
def get_device_losses(self):
return [t.loss_graph for t in self._towers]
def _setup_device(self, device, device_input_placeholders, num_data_in):
assert num_data_in <= len(device_input_placeholders)
with tf.device(device):
with tf.name_scope(TOWER_SCOPE_NAME):
device_input_batches = []
device_input_slices = []
for i, ph in enumerate(device_input_placeholders):
current_batch = tf.Variable(
ph,
trainable=False,
validate_shape=False,
collections=[])
device_input_batches.append(current_batch)
if i < num_data_in:
scale = self._max_seq_len
granularity = self._max_seq_len
else:
scale = self._max_seq_len
granularity = 1
current_slice = tf.slice(
current_batch,
([self._batch_index // scale * granularity] +
[0] * len(ph.shape[1:])),
([self._per_device_batch_size // scale * granularity] +
[-1] * len(ph.shape[1:])))
current_slice.set_shape(ph.shape)
device_input_slices.append(current_slice)
graph_obj = self.build_graph(device_input_slices)
device_grads = graph_obj.gradients(self.optimizer,
graph_obj._loss)
return Tower(
tf.group(
*[batch.initializer for batch in device_input_batches]),
device_grads, graph_obj)
# Each tower is a copy of the loss graph pinned to a specific device.
Tower = namedtuple("Tower", ["init_op", "grads", "loss_graph"])
def make_divisible_by(a, n):
if type(a) is int:
return a - a % n
return a[0:a.shape[0] - a.shape[0] % n]
def average_gradients(tower_grads):
"""Averages gradients across towers.
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the
gradient calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
TODO(ekl): We could use NCCL if this becomes a bottleneck.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is not None:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over
# below.
grads.append(expanded_g)
if not grads:
continue
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/multi_gpu_optimizer.py
|
Python
|
import logging
import math
import numpy as np
from collections import defaultdict
import ray
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.optimizers.rollout import collect_samples
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
class LocalMultiGPUOptimizer(PolicyOptimizer):
"""A synchronous optimizer that uses multiple local GPUs.
Samples are pulled synchronously from multiple remote workers,
concatenated, and then split across the memory of multiple local GPUs.
A number of SGD passes are then taken over the in-memory data. For more
details, see `multi_gpu_impl.LocalSyncParallelOptimizer`.
This optimizer is Tensorflow-specific and require the underlying
Policy to be a TFPolicy instance that support `.copy()`.
Note that all replicas of the TFPolicy will merge their
extra_compute_grad and apply_grad feed_dicts and fetches. This
may result in unexpected behavior.
"""
def __init__(self,
workers,
sgd_batch_size=128,
num_sgd_iter=10,
sample_batch_size=200,
num_envs_per_worker=1,
train_batch_size=1024,
num_gpus=0,
standardize_fields=[],
shuffle_sequences=True):
"""Initialize a synchronous multi-gpu optimizer.
Arguments:
workers (WorkerSet): all workers
sgd_batch_size (int): SGD minibatch size within train batch size
num_sgd_iter (int): number of passes to learn on per train batch
sample_batch_size (int): size of batches to sample from workers
num_envs_per_worker (int): num envs in each rollout worker
train_batch_size (int): size of batches to learn on
num_gpus (int): number of GPUs to use for data-parallel SGD
standardize_fields (list): list of fields in the training batch
to normalize
shuffle_sequences (bool): whether to shuffle the train batch prior
to SGD to break up correlations
"""
PolicyOptimizer.__init__(self, workers)
self.batch_size = sgd_batch_size
self.num_sgd_iter = num_sgd_iter
self.num_envs_per_worker = num_envs_per_worker
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
self.shuffle_sequences = shuffle_sequences
if not num_gpus:
self.devices = ["/cpu:0"]
else:
self.devices = [
"/gpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
self.batch_size = int(sgd_batch_size / len(self.devices)) * len(
self.devices)
assert self.batch_size % len(self.devices) == 0
assert self.batch_size >= len(self.devices), "batch size too small"
self.per_device_batch_size = int(self.batch_size / len(self.devices))
self.sample_timer = TimerStat()
self.load_timer = TimerStat()
self.grad_timer = TimerStat()
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
logger.info("LocalMultiGPUOptimizer devices {}".format(self.devices))
self.policies = dict(self.workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
for policy_id, policy in self.policies.items():
if not isinstance(policy, TFPolicy):
raise ValueError(
"Only TF graph policies are supported with multi-GPU. "
"Try setting `simple_optimizer=True` instead.")
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
self.optimizers = {}
with self.workers.local_worker().tf_sess.graph.as_default():
with self.workers.local_worker().tf_sess.as_default():
for policy_id, policy in self.policies.items():
with tf.variable_scope(policy_id, reuse=tf.AUTO_REUSE):
if policy._state_inputs:
rnn_inputs = policy._state_inputs + [
policy._seq_lens
]
else:
rnn_inputs = []
self.optimizers[policy_id] = (
LocalSyncParallelOptimizer(
policy._optimizer, self.devices,
[v
for _, v in policy._loss_inputs], rnn_inputs,
self.per_device_batch_size, policy.copy))
self.sess = self.workers.local_worker().tf_sess
self.sess.run(tf.global_variables_initializer())
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
if self.workers.remote_workers():
samples = collect_samples(
self.workers.remote_workers(), self.sample_batch_size,
self.num_envs_per_worker, self.train_batch_size)
if samples.count > self.train_batch_size * 2:
logger.info(
"Collected more training samples than expected "
"(actual={}, train_batch_size={}). ".format(
samples.count, self.train_batch_size) +
"This may be because you have many workers or "
"long episodes in 'complete_episodes' batch mode.")
else:
samples = []
while sum(s.count for s in samples) < self.train_batch_size:
samples.append(self.workers.local_worker().sample())
samples = SampleBatch.concat_samples(samples)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in self.standardize_fields:
value = batch[field]
standardized = (value - value.mean()) / max(1e-4, value.std())
batch[field] = standardized
num_loaded_tuples = {}
with self.load_timer:
for policy_id, batch in samples.policy_batches.items():
if policy_id not in self.policies:
continue
policy = self.policies[policy_id]
policy._debug_vars()
tuples = policy._get_loss_inputs_dict(
batch, shuffle=self.shuffle_sequences)
data_keys = [ph for _, ph in policy._loss_inputs]
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = (
self.optimizers[policy_id].load_data(
self.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys]))
fetches = {}
with self.grad_timer:
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = max(
1,
int(tuples_per_device) // int(self.per_device_batch_size))
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] *
self.per_device_batch_size)
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i,
_averaged(iter_extra_fetches)))
fetches[policy_id] = _averaged(iter_extra_fetches)
self.num_steps_sampled += samples.count
self.num_steps_trained += tuples_per_device * len(self.devices)
self.learner_stats = fetches
return fetches
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"load_time_ms": round(1000 * self.load_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"learner": self.learner_stats,
})
def _averaged(kv):
out = {}
for k, v in kv.items():
if v[0] is not None and not isinstance(v[0], dict):
out[k] = np.mean(v)
return out
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/policy_optimizer.py
|
Python
|
import logging
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.evaluation.metrics import collect_episodes, summarize_episodes
logger = logging.getLogger(__name__)
@DeveloperAPI
class PolicyOptimizer:
"""Policy optimizers encapsulate distributed RL optimization strategies.
Policy optimizers serve as the "control plane" of algorithms.
For example, AsyncOptimizer is used for A3C, and LocalMultiGPUOptimizer is
used for PPO. These optimizers are all pluggable, and it is possible
to mix and match as needed.
Attributes:
config (dict): The JSON configuration passed to this optimizer.
workers (WorkerSet): The set of rollout workers to use.
num_steps_trained (int): Number of timesteps trained on so far.
num_steps_sampled (int): Number of timesteps sampled so far.
"""
@DeveloperAPI
def __init__(self, workers):
"""Create an optimizer instance.
Args:
workers (WorkerSet): The set of rollout workers to use.
"""
self.workers = workers
self.episode_history = []
self.to_be_collected = []
# Counters that should be updated by sub-classes
self.num_steps_trained = 0
self.num_steps_sampled = 0
@DeveloperAPI
def step(self):
"""Takes a logical optimization step.
This should run for long enough to minimize call overheads (i.e., at
least a couple seconds), but short enough to return control
periodically to callers (i.e., at most a few tens of seconds).
Returns:
fetches (dict|None): Optional fetches from compute grads calls.
"""
raise NotImplementedError
@DeveloperAPI
def stats(self):
"""Returns a dictionary of internal performance statistics."""
return {
"num_steps_trained": self.num_steps_trained,
"num_steps_sampled": self.num_steps_sampled,
}
@DeveloperAPI
def save(self):
"""Returns a serializable object representing the optimizer state."""
return [self.num_steps_trained, self.num_steps_sampled]
@DeveloperAPI
def restore(self, data):
"""Restores optimizer state from the given data object."""
self.num_steps_trained = data[0]
self.num_steps_sampled = data[1]
@DeveloperAPI
def stop(self):
"""Release any resources used by this optimizer."""
pass
@DeveloperAPI
def collect_metrics(self,
timeout_seconds,
min_history=100,
selected_workers=None):
"""Returns worker and optimizer stats.
Arguments:
timeout_seconds (int): Max wait time for a worker before
dropping its results. This usually indicates a hung worker.
min_history (int): Min history length to smooth results over.
selected_workers (list): Override the list of remote workers
to collect metrics from.
Returns:
res (dict): A training result dict from worker metrics with
`info` replaced with stats from self.
"""
episodes, self.to_be_collected = collect_episodes(
self.workers.local_worker(),
selected_workers or self.workers.remote_workers(),
self.to_be_collected,
timeout_seconds=timeout_seconds)
orig_episodes = list(episodes)
missing = min_history - len(episodes)
if missing > 0:
episodes.extend(self.episode_history[-missing:])
assert len(episodes) <= min_history
self.episode_history.extend(orig_episodes)
self.episode_history = self.episode_history[-min_history:]
res = summarize_episodes(episodes, orig_episodes)
res.update(info=self.stats())
return res
@DeveloperAPI
def reset(self, remote_workers):
"""Called to change the set of remote workers being used."""
self.workers.reset(remote_workers)
@DeveloperAPI
def foreach_worker(self, func):
"""Apply the given function to each worker instance."""
return self.workers.foreach_worker(func)
@DeveloperAPI
def foreach_worker_with_index(self, func):
"""Apply the given function to each worker instance.
The index will be passed as the second arg to the given function.
"""
return self.workers.foreach_worker_with_index(func)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/replay_buffer.py
|
Python
|
import numpy as np
import random
import sys
from ray.rllib.optimizers.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.compression import unpack_if_needed
from ray.rllib.utils.window_stat import WindowStat
@DeveloperAPI
class ReplayBuffer:
@DeveloperAPI
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self._hit_count = np.zeros(size)
self._eviction_started = False
self._num_added = 0
self._num_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
def __len__(self):
return len(self._storage)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
self._num_added += 1
if self._next_idx >= len(self._storage):
self._storage.append(data)
self._est_size_bytes += sum(sys.getsizeof(d) for d in data)
else:
self._storage[self._next_idx] = data
if self._next_idx + 1 >= self._maxsize:
self._eviction_started = True
self._next_idx = (self._next_idx + 1) % self._maxsize
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(unpack_if_needed(obs_tp1), copy=False))
dones.append(done)
self._hit_count[i] += 1
return (np.array(obses_t), np.array(actions), np.array(rewards),
np.array(obses_tp1), np.array(dones))
@DeveloperAPI
def sample_idxes(self, batch_size):
return np.random.randint(0, len(self._storage), batch_size)
@DeveloperAPI
def sample_with_idxes(self, idxes):
self._num_sampled += len(idxes)
return self._encode_sample(idxes)
@DeveloperAPI
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [
random.randint(0,
len(self._storage) - 1) for _ in range(batch_size)
]
self._num_sampled += batch_size
return self._encode_sample(idxes)
@DeveloperAPI
def stats(self, debug=False):
data = {
"added_count": self._num_added,
"sampled_count": self._num_sampled,
"est_size_bytes": self._est_size_bytes,
"num_entries": len(self._storage),
}
if debug:
data.update(self._evicted_hit_stats.stats())
return data
@DeveloperAPI
class PrioritizedReplayBuffer(ReplayBuffer):
@DeveloperAPI
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self._prio_change_stats = WindowStat("reprio", 1000)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).add(obs_t, action, reward,
obs_tp1, done, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight**self._alpha
self._it_min[idx] = weight**self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
@DeveloperAPI
def sample_idxes(self, batch_size):
return self._sample_proportional(batch_size)
@DeveloperAPI
def sample_with_idxes(self, idxes, beta):
assert beta > 0
self._num_sampled += len(idxes)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
self._num_sampled += batch_size
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage))**(-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage))**(-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
@DeveloperAPI
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority**self._alpha - self._it_sum[idx]
self._prio_change_stats.push(delta)
self._it_sum[idx] = priority**self._alpha
self._it_min[idx] = priority**self._alpha
self._max_priority = max(self._max_priority, priority)
@DeveloperAPI
def stats(self, debug=False):
parent = ReplayBuffer.stats(self, debug)
if debug:
parent.update(self._prio_change_stats.stats())
return parent
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/rollout.py
|
Python
|
import logging
import ray
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
def collect_samples(agents, sample_batch_size, num_envs_per_worker,
train_batch_size):
"""Collects at least train_batch_size samples, never discarding any."""
num_timesteps_so_far = 0
trajectories = []
agent_dict = {}
for agent in agents:
fut_sample = agent.sample.remote()
agent_dict[fut_sample] = agent
while agent_dict:
[fut_sample], _ = ray.wait(list(agent_dict))
agent = agent_dict.pop(fut_sample)
next_sample = ray_get_and_free(fut_sample)
assert next_sample.count >= sample_batch_size * num_envs_per_worker
num_timesteps_so_far += next_sample.count
trajectories.append(next_sample)
# Only launch more tasks if we don't already have enough pending
pending = len(agent_dict) * sample_batch_size * num_envs_per_worker
if num_timesteps_so_far + pending < train_batch_size:
fut_sample2 = agent.sample.remote()
agent_dict[fut_sample2] = agent
return SampleBatch.concat_samples(trajectories)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/segment_tree.py
|
Python
|
import operator
class SegmentTree:
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, \
"capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1,
node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1,
node_end))
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(
arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array
elements.
"""
if end is None:
end = self._capacity - 1
if end < 0:
end += self._capacity
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(self._value[2 * idx],
self._value[2 * idx + 1])
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add, neutral_element=0.0)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=min, neutral_element=float("inf"))
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/sync_batch_replay_optimizer.py
|
Python
|
import random
import ray
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.memory import ray_get_and_free
class SyncBatchReplayOptimizer(PolicyOptimizer):
"""Variant of the sync replay optimizer that replays entire batches.
This enables RNN support. Does not currently support prioritization."""
def __init__(self,
workers,
learning_starts=1000,
buffer_size=10000,
train_batch_size=32):
"""Initialize a batch replay optimizer.
Arguments:
workers (WorkerSet): set of all workers
learning_starts (int): start learning after this number of
timesteps have been collected
buffer_size (int): max timesteps to keep in the replay buffer
train_batch_size (int): number of timesteps to train on at once
"""
PolicyOptimizer.__init__(self, workers)
self.replay_starts = learning_starts
self.max_buffer_size = buffer_size
self.train_batch_size = train_batch_size
assert self.max_buffer_size >= self.replay_starts
# List of buffered sample batches
self.replay_buffer = []
self.buffer_size = 0
# Stats
self.update_weights_timer = TimerStat()
self.sample_timer = TimerStat()
self.grad_timer = TimerStat()
self.learner_stats = {}
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
if self.workers.remote_workers():
batches = ray_get_and_free(
[e.sample.remote() for e in self.workers.remote_workers()])
else:
batches = [self.workers.local_worker().sample()]
# Handle everything as if multiagent
tmp = []
for batch in batches:
if isinstance(batch, SampleBatch):
batch = MultiAgentBatch({
DEFAULT_POLICY_ID: batch
}, batch.count)
tmp.append(batch)
batches = tmp
for batch in batches:
if batch.count > self.max_buffer_size:
raise ValueError(
"The size of a single sample batch exceeds the replay "
"buffer size ({} > {})".format(batch.count,
self.max_buffer_size))
self.replay_buffer.append(batch)
self.num_steps_sampled += batch.count
self.buffer_size += batch.count
while self.buffer_size > self.max_buffer_size:
evicted = self.replay_buffer.pop(0)
self.buffer_size -= evicted.count
if self.num_steps_sampled >= self.replay_starts:
return self._optimize()
else:
return {}
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"opt_peak_throughput": round(self.grad_timer.mean_throughput,
3),
"opt_samples": round(self.grad_timer.mean_units_processed, 3),
"learner": self.learner_stats,
})
def _optimize(self):
samples = [random.choice(self.replay_buffer)]
while sum(s.count for s in samples) < self.train_batch_size:
samples.append(random.choice(self.replay_buffer))
samples = SampleBatch.concat_samples(samples)
with self.grad_timer:
info_dict = self.workers.local_worker().learn_on_batch(samples)
for policy_id, info in info_dict.items():
self.learner_stats[policy_id] = get_learner_stats(info)
self.grad_timer.push_units_processed(samples.count)
self.num_steps_trained += samples.count
return info_dict
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/sync_replay_optimizer.py
|
Python
|
import logging
import collections
import numpy as np
import ray
from ray.rllib.optimizers.replay_buffer import ReplayBuffer, \
PrioritizedReplayBuffer
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.compression import pack_if_needed
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.schedules import LinearSchedule
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
class SyncReplayOptimizer(PolicyOptimizer):
"""Variant of the local sync optimizer that supports replay (for DQN).
This optimizer requires that rollout workers return an additional
"td_error" array in the info return of compute_gradients(). This error
term will be used for sample prioritization."""
def __init__(self,
workers,
learning_starts=1000,
buffer_size=10000,
prioritized_replay=True,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
prioritized_replay_eps=1e-6,
schedule_max_timesteps=100000,
beta_annealing_fraction=0.2,
final_prioritized_replay_beta=0.4,
train_batch_size=32,
sample_batch_size=4,
before_learn_on_batch=None,
synchronize_sampling=False):
"""Initialize an sync replay optimizer.
Arguments:
workers (WorkerSet): all workers
learning_starts (int): wait until this many steps have been sampled
before starting optimization.
buffer_size (int): max size of the replay buffer
prioritized_replay (bool): whether to enable prioritized replay
prioritized_replay_alpha (float): replay alpha hyperparameter
prioritized_replay_beta (float): replay beta hyperparameter
prioritized_replay_eps (float): replay eps hyperparameter
schedule_max_timesteps (int): number of timesteps in the schedule
beta_annealing_fraction (float): fraction of schedule to anneal
beta over
final_prioritized_replay_beta (float): final value of beta
train_batch_size (int): size of batches to learn on
sample_batch_size (int): size of batches to sample from workers
before_learn_on_batch (function): callback to run before passing
the sampled batch to learn on
synchronize_sampling (bool): whether to sample the experiences for
all policies with the same indices (used in MADDPG).
"""
PolicyOptimizer.__init__(self, workers)
self.replay_starts = learning_starts
# linearly annealing beta used in Rainbow paper
self.prioritized_replay_beta = LinearSchedule(
schedule_timesteps=int(
schedule_max_timesteps * beta_annealing_fraction),
initial_p=prioritized_replay_beta,
final_p=final_prioritized_replay_beta)
self.prioritized_replay_eps = prioritized_replay_eps
self.train_batch_size = train_batch_size
self.before_learn_on_batch = before_learn_on_batch
self.synchronize_sampling = synchronize_sampling
# Stats
self.update_weights_timer = TimerStat()
self.sample_timer = TimerStat()
self.replay_timer = TimerStat()
self.grad_timer = TimerStat()
self.learner_stats = {}
# Set up replay buffer
if prioritized_replay:
def new_buffer():
return PrioritizedReplayBuffer(
buffer_size, alpha=prioritized_replay_alpha)
else:
def new_buffer():
return ReplayBuffer(buffer_size)
self.replay_buffers = collections.defaultdict(new_buffer)
if buffer_size < self.replay_starts:
logger.warning("buffer_size={} < replay_starts={}".format(
buffer_size, self.replay_starts))
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
if self.workers.remote_workers():
batch = SampleBatch.concat_samples(
ray_get_and_free([
e.sample.remote()
for e in self.workers.remote_workers()
]))
else:
batch = self.workers.local_worker().sample()
# Handle everything as if multiagent
if isinstance(batch, SampleBatch):
batch = MultiAgentBatch({
DEFAULT_POLICY_ID: batch
}, batch.count)
for policy_id, s in batch.policy_batches.items():
for row in s.rows():
self.replay_buffers[policy_id].add(
pack_if_needed(row["obs"]),
row["actions"],
row["rewards"],
pack_if_needed(row["new_obs"]),
row["dones"],
weight=None)
if self.num_steps_sampled >= self.replay_starts:
self._optimize()
self.num_steps_sampled += batch.count
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"replay_time_ms": round(1000 * self.replay_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"opt_peak_throughput": round(self.grad_timer.mean_throughput,
3),
"opt_samples": round(self.grad_timer.mean_units_processed, 3),
"learner": self.learner_stats,
})
def _optimize(self):
samples = self._replay()
with self.grad_timer:
if self.before_learn_on_batch:
samples = self.before_learn_on_batch(
samples,
self.workers.local_worker().policy_map,
self.train_batch_size)
info_dict = self.workers.local_worker().learn_on_batch(samples)
for policy_id, info in info_dict.items():
self.learner_stats[policy_id] = get_learner_stats(info)
replay_buffer = self.replay_buffers[policy_id]
if isinstance(replay_buffer, PrioritizedReplayBuffer):
td_error = info["td_error"]
new_priorities = (
np.abs(td_error) + self.prioritized_replay_eps)
replay_buffer.update_priorities(
samples.policy_batches[policy_id]["batch_indexes"],
new_priorities)
self.grad_timer.push_units_processed(samples.count)
self.num_steps_trained += samples.count
def _replay(self):
samples = {}
idxes = None
with self.replay_timer:
for policy_id, replay_buffer in self.replay_buffers.items():
if self.synchronize_sampling:
if idxes is None:
idxes = replay_buffer.sample_idxes(
self.train_batch_size)
else:
idxes = replay_buffer.sample_idxes(self.train_batch_size)
if isinstance(replay_buffer, PrioritizedReplayBuffer):
(obses_t, actions, rewards, obses_tp1, dones, weights,
batch_indexes) = replay_buffer.sample_with_idxes(
idxes,
beta=self.prioritized_replay_beta.value(
self.num_steps_trained))
else:
(obses_t, actions, rewards, obses_tp1,
dones) = replay_buffer.sample_with_idxes(idxes)
weights = np.ones_like(rewards)
batch_indexes = -np.ones_like(rewards)
samples[policy_id] = SampleBatch({
"obs": obses_t,
"actions": actions,
"rewards": rewards,
"new_obs": obses_tp1,
"dones": dones,
"weights": weights,
"batch_indexes": batch_indexes
})
return MultiAgentBatch(samples, self.train_batch_size)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/sync_samples_optimizer.py
|
Python
|
import logging
import random
from collections import defaultdict
import ray
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.optimizers.multi_gpu_optimizer import _averaged
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.filter import RunningStat
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
class SyncSamplesOptimizer(PolicyOptimizer):
"""A simple synchronous RL optimizer.
In each step, this optimizer pulls samples from a number of remote
workers, concatenates them, and then updates a local model. The updated
model weights are then broadcast to all remote workers.
"""
def __init__(self,
workers,
num_sgd_iter=1,
train_batch_size=1,
sgd_minibatch_size=0,
standardize_fields=frozenset([])):
PolicyOptimizer.__init__(self, workers)
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
self.sample_timer = TimerStat()
self.grad_timer = TimerStat()
self.throughput = RunningStat()
self.num_sgd_iter = num_sgd_iter
self.sgd_minibatch_size = sgd_minibatch_size
self.train_batch_size = train_batch_size
self.learner_stats = {}
self.policies = dict(self.workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
samples = []
while sum(s.count for s in samples) < self.train_batch_size:
if self.workers.remote_workers():
samples.extend(
ray_get_and_free([
e.sample.remote()
for e in self.workers.remote_workers()
]))
else:
samples.append(self.workers.local_worker().sample())
samples = SampleBatch.concat_samples(samples)
self.sample_timer.push_units_processed(samples.count)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
fetches = {}
with self.grad_timer:
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in self.standardize_fields:
value = batch[field]
standardized = (value - value.mean()) / max(
1e-4, value.std())
batch[field] = standardized
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
for minibatch in self._minibatches(batch):
batch_fetches = (
self.workers.local_worker().learn_on_batch(
MultiAgentBatch({
policy_id: minibatch
}, minibatch.count)))[policy_id]
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i,
_averaged(iter_extra_fetches)))
fetches[policy_id] = _averaged(iter_extra_fetches)
self.grad_timer.push_units_processed(samples.count)
if len(fetches) == 1 and DEFAULT_POLICY_ID in fetches:
self.learner_stats = fetches[DEFAULT_POLICY_ID]
else:
self.learner_stats = fetches
self.num_steps_sampled += samples.count
self.num_steps_trained += samples.count
return self.learner_stats
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"opt_peak_throughput": round(self.grad_timer.mean_throughput,
3),
"sample_peak_throughput": round(
self.sample_timer.mean_throughput, 3),
"opt_samples": round(self.grad_timer.mean_units_processed, 3),
"learner": self.learner_stats,
})
def _minibatches(self, samples):
if not self.sgd_minibatch_size:
yield samples
return
if isinstance(samples, MultiAgentBatch):
raise NotImplementedError(
"Minibatching not implemented for multi-agent in simple mode")
if "state_in_0" in samples.data:
logger.warning("Not shuffling RNN data for SGD in simple mode")
else:
samples.shuffle()
i = 0
slices = []
while i < samples.count:
slices.append((i, i + self.sgd_minibatch_size))
i += self.sgd_minibatch_size
random.shuffle(slices)
for i, j in slices:
yield samples.slice(i, j)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/optimizers/tests/test_segment_tree.py
|
Python
|
import numpy as np
from ray.rllib.optimizers.segment_tree import SumSegmentTree, MinSegmentTree
def test_tree_set():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, -1), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
def test_tree_set_overlap():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, -1), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.0) == 2
assert tree.find_prefixsum_idx(0.5) == 2
assert tree.find_prefixsum_idx(0.99) == 2
assert tree.find_prefixsum_idx(1.01) == 3
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(4.00) == 3
def test_prefixsum_idx2():
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.00) == 0
assert tree.find_prefixsum_idx(0.55) == 1
assert tree.find_prefixsum_idx(0.99) == 1
assert tree.find_prefixsum_idx(1.51) == 2
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(5.50) == 3
def test_max_interval_tree():
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, -1), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, -1), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, -1), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, -1), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
if __name__ == "__main__":
test_tree_set()
test_tree_set_overlap()
test_prefixsum_idx()
test_prefixsum_idx2()
test_max_interval_tree()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/__init__.py
|
Python
|
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.policy.tf_policy_template import build_tf_policy
__all__ = [
"Policy",
"TFPolicy",
"TorchPolicy",
"build_tf_policy",
"build_torch_policy",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/dynamic_tf_policy.py
|
Python
|
"""Graph mode TF policy built using build_tf_policy()."""
from collections import OrderedDict
import logging
import numpy as np
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.tracking_dict import UsageTrackingDict
tf = try_import_tf()
logger = logging.getLogger(__name__)
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
"""
def __init__(self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=None,
grad_stats_fn=None,
before_loss_init=None,
make_model=None,
action_sampler_fn=None,
existing_inputs=None,
existing_model=None,
get_batch_divisibility_req=None,
obs_include_prev_action_reward=True):
"""Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (Optional[callable]): Optional function to run
prior to loss init that takes the same arguments as __init__.
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): When copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
"""
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
# Setup standard placeholders
prev_actions = None
prev_rewards = None
if existing_inputs is not None:
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(
action_space)
prev_rewards = tf.placeholder(
tf.float32, [None], name="prev_reward")
self._input_dict = {
SampleBatch.CUR_OBS: obs,
SampleBatch.PREV_ACTIONS: prev_actions,
SampleBatch.PREV_REWARDS: prev_rewards,
"is_training": self._get_is_training_placeholder(),
}
self._seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Setup model
if action_sampler_fn:
if not make_model:
raise ValueError(
"make_model is required if action_sampler_fn is given")
self.dist_class = None
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="tf")
if existing_inputs:
self._state_in = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_in:
self._seq_lens = existing_inputs["seq_lens"]
else:
self._state_in = [
tf.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
model_out, self._state_out = self.model(self._input_dict,
self._state_in, self._seq_lens)
# Setup action sampler
if action_sampler_fn:
action_sampler, action_logp = action_sampler_fn(
self, self.model, self._input_dict, obs_space, action_space,
config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
# Phase 1 init
sess = tf.get_default_session() or tf.Session()
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(
self,
obs_space,
action_space,
config,
sess,
obs_input=obs,
action_sampler=action_sampler,
action_logp=action_logp,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_in,
state_outputs=self._state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
if not existing_inputs:
self._initialize_loss()
@override(TFPolicy)
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if self._state_inputs:
num_state_inputs = len(self._state_inputs) + 1
else:
num_state_inputs = 0
if len(self._loss_inputs) + num_state_inputs != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_inputs,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_inputs):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(("state_in_{}".format(i),
existing_inputs[len(self._loss_inputs) + i]))
if rnn_inputs:
rnn_inputs.append(("seq_lens", existing_inputs[-1]))
input_dict = OrderedDict(
[(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)] + rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
@override(Policy)
def get_initial_state(self):
if self.model:
return self.model.get_initial_state()
else:
return []
def _initialize_loss(self):
def fake_array(tensor):
shape = tensor.shape.as_list()
shape = [s if s is not None else 1 for s in shape]
return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)
dummy_batch = {
SampleBatch.CUR_OBS: fake_array(self._obs_input),
SampleBatch.NEXT_OBS: fake_array(self._obs_input),
SampleBatch.DONES: np.array([False], dtype=np.bool),
SampleBatch.ACTIONS: fake_array(
ModelCatalog.get_action_placeholder(self.action_space)),
SampleBatch.REWARDS: np.array([0], dtype=np.float32),
}
if self._obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: fake_array(self._prev_action_input),
SampleBatch.PREV_REWARDS: fake_array(self._prev_reward_input),
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = np.expand_dims(h, 0)
dummy_batch["state_out_{}".format(i)] = np.expand_dims(h, 0)
state_batches.append(np.expand_dims(h, 0))
if state_init:
dummy_batch["seq_lens"] = np.array([1], dtype=np.int32)
for k, v in self.extra_compute_action_fetches().items():
dummy_batch[k] = fake_array(v)
# postprocessing might depend on variable init, so run it first here
self._sess.run(tf.global_variables_initializer())
postprocessed_batch = self.postprocess_trajectory(
SampleBatch(dummy_batch))
# model forward pass for the loss (needed after postprocess to
# overwrite any tensor state from that call)
self.model(self._input_dict, self._state_in, self._seq_lens)
if self._obs_include_prev_action_reward:
train_batch = UsageTrackingDict({
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.PREV_ACTIONS, self._prev_action_input),
(SampleBatch.PREV_REWARDS, self._prev_reward_input),
(SampleBatch.CUR_OBS, self._obs_input),
]
else:
train_batch = UsageTrackingDict({
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.CUR_OBS, self._obs_input),
]
for k, v in postprocessed_batch.items():
if k in train_batch:
continue
elif v.dtype == np.object:
continue # can't handle arbitrary objects in TF
elif k == "seq_lens" or k.startswith("state_in_"):
continue
shape = (None, ) + v.shape[1:]
dtype = np.float32 if v.dtype == np.float64 else v.dtype
placeholder = tf.placeholder(dtype, shape=shape, name=k)
train_batch[k] = placeholder
for i, si in enumerate(self._state_in):
train_batch["state_in_{}".format(i)] = si
train_batch["seq_lens"] = self._seq_lens
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
self._loss_input_dict = train_batch
loss = self._do_loss_init(train_batch)
for k in sorted(train_batch.accessed_keys):
if k != "seq_lens" and not k.startswith("state_in_"):
loss_inputs.append((k, train_batch[k]))
TFPolicy._initialize_loss(self, loss, loss_inputs)
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
self._sess.run(tf.global_variables_initializer())
def _do_loss_init(self, train_batch):
loss = self._loss_fn(self, self.model, self.dist_class, train_batch)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# override the update ops to be those of the model
self._update_ops = self.model.update_ops()
return loss
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/eager_tf_policy.py
|
Python
|
"""Eager mode TF policy built using build_tf_policy().
It supports both traced and non-traced eager execution modes."""
import logging
import functools
import numpy as np
from ray.rllib.evaluation.episode import _flatten_action
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.policy import ACTION_PROB, ACTION_LOGP
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override
from ray.rllib.utils.debug import log_once
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
def _convert_to_tf(x):
if isinstance(x, SampleBatch):
x = {k: v for k, v in x.items() if k != SampleBatch.INFOS}
return tf.nest.map_structure(_convert_to_tf, x)
if isinstance(x, Policy):
return x
if x is not None:
x = tf.nest.map_structure(
lambda f: tf.convert_to_tensor(f) if f is not None else None, x)
return x
def _convert_to_numpy(x):
if x is None:
return None
try:
return x.numpy()
except AttributeError:
raise TypeError(
("Object of type {} has no method to convert to numpy.").format(
type(x)))
def convert_eager_inputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
if tf.executing_eagerly():
args = [_convert_to_tf(x) for x in args]
# TODO(gehring): find a way to remove specific hacks
kwargs = {
k: _convert_to_tf(v)
for k, v in kwargs.items()
if k not in {"info_batch", "episodes"}
}
return func(*args, **kwargs)
return _func
def convert_eager_outputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
out = func(*args, **kwargs)
if tf.executing_eagerly():
out = tf.nest.map_structure(_convert_to_numpy, out)
return out
return _func
def _disallow_var_creation(next_creator, **kw):
v = next_creator(**kw)
raise ValueError("Detected a variable being created during an eager "
"forward pass. Variables should only be created during "
"model initialization: {}".format(v.name))
def traced_eager_policy(eager_policy_cls):
"""Wrapper that enables tracing for all eager policy methods.
This is enabled by the --trace / "eager_tracing" config."""
class TracedEagerPolicy(eager_policy_cls):
def __init__(self, *args, **kwargs):
self._traced_learn_on_batch = None
self._traced_compute_actions = None
self._traced_compute_gradients = None
self._traced_apply_gradients = None
super(TracedEagerPolicy, self).__init__(*args, **kwargs)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def learn_on_batch(self, samples):
if self._traced_learn_on_batch is None:
self._traced_learn_on_batch = tf.function(
super(TracedEagerPolicy, self).learn_on_batch,
autograph=False)
return self._traced_learn_on_batch(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
obs_batch = tf.convert_to_tensor(obs_batch)
state_batches = _convert_to_tf(state_batches)
prev_action_batch = _convert_to_tf(prev_action_batch)
prev_reward_batch = _convert_to_tf(prev_reward_batch)
if self._traced_compute_actions is None:
self._traced_compute_actions = tf.function(
super(TracedEagerPolicy, self).compute_actions,
autograph=False)
return self._traced_compute_actions(
obs_batch, state_batches, prev_action_batch, prev_reward_batch,
info_batch, episodes, **kwargs)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_gradients(self, samples):
if self._traced_compute_gradients is None:
self._traced_compute_gradients = tf.function(
super(TracedEagerPolicy, self).compute_gradients,
autograph=False)
return self._traced_compute_gradients(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def apply_gradients(self, grads):
if self._traced_apply_gradients is None:
self._traced_apply_gradients = tf.function(
super(TracedEagerPolicy, self).apply_gradients,
autograph=False)
return self._traced_apply_gradients(grads)
TracedEagerPolicy.__name__ = eager_policy_cls.__name__
TracedEagerPolicy.__qualname__ = eager_policy_cls.__qualname__
return TracedEagerPolicy
def build_eager_tf_policy(name,
loss_fn,
get_default_config=None,
postprocess_fn=None,
stats_fn=None,
optimizer_fn=None,
gradients_fn=None,
apply_gradients_fn=None,
grad_stats_fn=None,
extra_learn_fetches_fn=None,
extra_action_fetches_fn=None,
before_init=None,
before_loss_init=None,
after_init=None,
make_model=None,
action_sampler_fn=None,
mixins=None,
obs_include_prev_action_reward=True,
get_batch_divisibility_req=None):
"""Build an eager TF policy.
An eager policy runs all operations in eager mode, which makes debugging
much simpler, but is lower performance.
You shouldn't need to call this directly. Rather, prefer to build a TF
graph policy and use set {"eager": true} in the trainer config to have
it automatically be converted to an eager policy.
This has the same signature as build_tf_policy()."""
base = add_mixins(Policy, mixins)
class eager_policy_cls(base):
def __init__(self, observation_space, action_space, config):
assert tf.executing_eagerly()
Policy.__init__(self, observation_space, action_space, config)
self._is_training = False
self._loss_initialized = False
self._sess = None
if get_default_config:
config = dict(get_default_config(), **config)
if before_init:
before_init(self, observation_space, action_space, config)
self.config = config
if action_sampler_fn:
if not make_model:
raise ValueError(
"make_model is required if action_sampler_fn is given")
self.dist_class = None
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if make_model:
self.model = make_model(self, observation_space, action_space,
config)
else:
self.model = ModelCatalog.get_model_v2(
observation_space,
action_space,
logit_dim,
config["model"],
framework="tf",
)
self.model({
SampleBatch.CUR_OBS: tf.convert_to_tensor(
np.array([observation_space.sample()])),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor(
[_flatten_action(action_space.sample())]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([0.]),
}, [
tf.convert_to_tensor(np.array([s]))
for s in self.model.get_initial_state()
], tf.convert_to_tensor([1]))
if before_loss_init:
before_loss_init(self, observation_space, action_space, config)
self._initialize_loss_with_dummy_batch()
self._loss_initialized = True
if optimizer_fn:
self._optimizer = optimizer_fn(self, config)
else:
self._optimizer = tf.train.AdamOptimizer(config["lr"])
if after_init:
after_init(self, observation_space, action_space, config)
@override(Policy)
def postprocess_trajectory(self,
samples,
other_agent_batches=None,
episode=None):
assert tf.executing_eagerly()
if postprocess_fn:
return postprocess_fn(self, samples, other_agent_batches,
episode)
else:
return samples
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def learn_on_batch(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
self._apply_gradients(grads_and_vars)
return stats
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_gradients(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
grads = [g for g, v in grads_and_vars]
return grads, stats
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
# TODO: remove python side effect to cull sources of bugs.
self._is_training = False
self._state_in = state_batches
if tf.executing_eagerly():
n = len(obs_batch)
else:
n = obs_batch.shape[0]
seq_lens = tf.ones(n)
input_dict = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch),
"is_training": tf.constant(False),
}
if obs_include_prev_action_reward:
input_dict.update({
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor(
prev_action_batch),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor(
prev_reward_batch),
})
with tf.variable_creator_scope(_disallow_var_creation):
model_out, state_out = self.model(input_dict, state_batches,
seq_lens)
if self.dist_class:
action_dist = self.dist_class(model_out, self.model)
action = action_dist.sample()
logp = action_dist.sampled_action_logp()
else:
action, logp = action_sampler_fn(
self, self.model, input_dict, self.observation_space,
self.action_space, self.config)
fetches = {}
if logp is not None:
fetches.update({
ACTION_PROB: tf.exp(logp),
ACTION_LOGP: logp,
})
if extra_action_fetches_fn:
fetches.update(extra_action_fetches_fn(self))
return action, state_out, fetches
@override(Policy)
def apply_gradients(self, gradients):
self._apply_gradients(
zip([(tf.convert_to_tensor(g) if g is not None else None)
for g in gradients], self.model.trainable_variables()))
@override(Policy)
def get_weights(self):
variables = self.variables()
return [v.numpy() for v in variables]
@override(Policy)
def set_weights(self, weights):
variables = self.variables()
assert len(weights) == len(variables), (len(weights),
len(variables))
for v, w in zip(variables, weights):
v.assign(w)
def variables(self):
"""Return the list of all savable variables for this policy."""
return self.model.variables()
def is_recurrent(self):
return len(self._state_in) > 0
@override(Policy)
def num_state_tensors(self):
return len(self._state_in)
def get_session(self):
return None # None implies eager
def get_placeholder(self, ph):
raise ValueError(
"get_placeholder() is not allowed in eager mode. Try using "
"rllib.utils.tf_ops.make_tf_callable() to write "
"functions that work in both graph and eager mode.")
def loss_initialized(self):
return self._loss_initialized
@override(Policy)
def export_model(self, export_dir):
pass
@override(Policy)
def export_checkpoint(self, export_dir):
pass
def _get_is_training_placeholder(self):
return tf.convert_to_tensor(self._is_training)
def _apply_gradients(self, grads_and_vars):
if apply_gradients_fn:
apply_gradients_fn(self, self._optimizer, grads_and_vars)
else:
self._optimizer.apply_gradients(grads_and_vars)
def _compute_gradients(self, samples):
"""Computes and returns grads as eager tensors."""
self._is_training = True
with tf.GradientTape(persistent=gradients_fn is not None) as tape:
# TODO: set seq len and state in properly
self._seq_lens = tf.ones(samples[SampleBatch.CUR_OBS].shape[0])
self._state_in = []
model_out, _ = self.model(samples, self._state_in,
self._seq_lens)
loss = loss_fn(self, self.model, self.dist_class, samples)
variables = self.model.trainable_variables()
if gradients_fn:
class OptimizerWrapper:
def __init__(self, tape):
self.tape = tape
def compute_gradients(self, loss, var_list):
return list(
zip(self.tape.gradient(loss, var_list), var_list))
grads_and_vars = gradients_fn(self, OptimizerWrapper(tape),
loss)
else:
grads_and_vars = list(
zip(tape.gradient(loss, variables), variables))
if log_once("grad_vars"):
for _, v in grads_and_vars:
logger.info("Optimizing variable {}".format(v.name))
grads = [g for g, v in grads_and_vars]
stats = self._stats(self, samples, grads)
return grads_and_vars, stats
def _stats(self, outputs, samples, grads):
fetches = {}
if stats_fn:
fetches[LEARNER_STATS_KEY] = {
k: v
for k, v in stats_fn(outputs, samples).items()
}
else:
fetches[LEARNER_STATS_KEY] = {}
if extra_learn_fetches_fn:
fetches.update(
{k: v
for k, v in extra_learn_fetches_fn(self).items()})
if grad_stats_fn:
fetches.update({
k: v
for k, v in grad_stats_fn(self, samples, grads).items()
})
return fetches
def _initialize_loss_with_dummy_batch(self):
# Dummy forward pass to initialize any policy attributes, etc.
action_dtype, action_shape = ModelCatalog.get_action_shape(
self.action_space)
dummy_batch = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(
np.array([self.observation_space.sample()])),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(
np.array([self.observation_space.sample()])),
SampleBatch.DONES: tf.convert_to_tensor(
np.array([False], dtype=np.bool)),
SampleBatch.ACTIONS: tf.convert_to_tensor(
np.zeros(
(1, ) + action_shape[1:],
dtype=action_dtype.as_numpy_dtype())),
SampleBatch.REWARDS: tf.convert_to_tensor(
np.array([0], dtype=np.float32)),
}
if obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: dummy_batch[SampleBatch.ACTIONS],
SampleBatch.PREV_REWARDS: dummy_batch[SampleBatch.REWARDS],
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = tf.convert_to_tensor(
np.expand_dims(h, 0))
dummy_batch["state_out_{}".format(i)] = tf.convert_to_tensor(
np.expand_dims(h, 0))
state_batches.append(
tf.convert_to_tensor(np.expand_dims(h, 0)))
if state_init:
dummy_batch["seq_lens"] = tf.convert_to_tensor(
np.array([1], dtype=np.int32))
# for IMPALA which expects a certain sample batch size
def tile_to(tensor, n):
return tf.tile(tensor,
[n] + [1 for _ in tensor.shape.as_list()[1:]])
if get_batch_divisibility_req:
dummy_batch = {
k: tile_to(v, get_batch_divisibility_req(self))
for k, v in dummy_batch.items()
}
# Execute a forward pass to get self.action_dist etc initialized,
# and also obtain the extra action fetches
_, _, fetches = self.compute_actions(
dummy_batch[SampleBatch.CUR_OBS], state_batches,
dummy_batch.get(SampleBatch.PREV_ACTIONS),
dummy_batch.get(SampleBatch.PREV_REWARDS))
dummy_batch.update(fetches)
postprocessed_batch = self.postprocess_trajectory(
SampleBatch(dummy_batch))
# model forward pass for the loss (needed after postprocess to
# overwrite any tensor state from that call)
self.model.from_batch(dummy_batch)
postprocessed_batch = {
k: tf.convert_to_tensor(v)
for k, v in postprocessed_batch.items()
}
loss_fn(self, self.model, self.dist_class, postprocessed_batch)
if stats_fn:
stats_fn(self, postprocessed_batch)
@classmethod
def with_tracing(cls):
return traced_eager_policy(cls)
eager_policy_cls.__name__ = name + "_eager"
eager_policy_cls.__qualname__ = name + "_eager"
return eager_policy_cls
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/policy.py
|
Python
|
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import gym
import numpy as np
from ray.rllib.utils.annotations import DeveloperAPI
# By convention, metrics from optimizing the loss can be reported in the
# `grad_info` dict returned by learn_on_batch() / compute_grads() via this key.
LEARNER_STATS_KEY = "learner_stats"
ACTION_PROB = "action_prob"
ACTION_LOGP = "action_logp"
class TupleActions(namedtuple("TupleActions", ["batches"])):
"""Used to return tuple actions as a list of batches per tuple element."""
def __new__(cls, batches):
return super(TupleActions, cls).__new__(cls, batches)
def numpy(self):
return TupleActions([b.numpy() for b in self.batches])
@DeveloperAPI
class Policy(metaclass=ABCMeta):
"""An agent policy and loss, i.e., a TFPolicy or other subclass.
This object defines how to act in the environment, and also losses used to
improve the policy based on its experiences. Note that both policy and
loss are defined together for convenience, though the policy itself is
logically separate.
All policies can directly extend Policy, however TensorFlow users may
find TFPolicy simpler to implement. TFPolicy also enables RLlib
to apply TensorFlow-specific optimizations such as fusing multiple policy
graphs and multi-GPU support.
Attributes:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
"""
@DeveloperAPI
def __init__(self, observation_space, action_space, config):
"""Initialize the graph.
This is the standard constructor for policies. The policy
class you pass into RolloutWorker will be constructed with
these arguments.
Args:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
"""
self.observation_space = observation_space
self.action_space = action_space
self.config = config
@abstractmethod
@DeveloperAPI
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Computes actions for the current policy.
Args:
obs_batch (Union[List,np.ndarray]): Batch of observations.
state_batches (Optional[list]): List of RNN state input batches,
if any.
prev_action_batch (Optional[List,np.ndarray]): Batch of previous
action values.
prev_reward_batch (Optional[List,np.ndarray]): Batch of previous
rewards.
info_batch (info): Batch of info objects.
episodes (list): MultiAgentEpisode for each obs in obs_batch.
This provides access to all of the internal episode state,
which may be useful for model-based or multiagent algorithms.
kwargs: forward compatibility placeholder
Returns:
actions (np.ndarray): batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (list): list of RNN state output batches, if any, with
shape like [STATE_SIZE, BATCH_SIZE].
info (dict): dictionary of extra feature batches, if any, with
shape like {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
raise NotImplementedError
@DeveloperAPI
def compute_single_action(self,
obs,
state=None,
prev_action=None,
prev_reward=None,
info=None,
episode=None,
clip_actions=False,
**kwargs):
"""Unbatched version of compute_actions.
Arguments:
obs (obj): Single observation.
state (list): List of RNN state inputs, if any.
prev_action (obj): Previous action value, if any.
prev_reward (float): Previous reward, if any.
info (dict): info object, if any
episode (MultiAgentEpisode): this provides access to all of the
internal episode state, which may be useful for model-based or
multi-agent algorithms.
clip_actions (bool): should the action be clipped
kwargs: forward compatibility placeholder
Returns:
actions (obj): single action
state_outs (list): list of RNN state outputs, if any
info (dict): dictionary of extra features, if any
"""
prev_action_batch = None
prev_reward_batch = None
info_batch = None
episodes = None
if prev_action is not None:
prev_action_batch = [prev_action]
if prev_reward is not None:
prev_reward_batch = [prev_reward]
if info is not None:
info_batch = [info]
if episode is not None:
episodes = [episode]
[action], state_out, info = self.compute_actions(
[obs], [[s] for s in state],
prev_action_batch=prev_action_batch,
prev_reward_batch=prev_reward_batch,
info_batch=info_batch,
episodes=episodes)
if clip_actions:
action = clip_action(action, self.action_space)
# Return action, internal state(s), infos.
return action, [s[0] for s in state_out], \
{k: v[0] for k, v in info.items()}
@DeveloperAPI
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
"""Implements algorithm-specific trajectory postprocessing.
This will be called on each trajectory fragment computed during policy
evaluation. Each fragment is guaranteed to be only from one episode.
Arguments:
sample_batch (SampleBatch): batch of experiences for the policy,
which will contain at most one episode trajectory.
other_agent_batches (dict): In a multi-agent env, this contains a
mapping of agent ids to (policy, agent_batch) tuples
containing the policy and experiences of the other agents.
episode (MultiAgentEpisode): this provides access to all of the
internal episode state, which may be useful for model-based or
multi-agent algorithms.
Returns:
SampleBatch: Postprocessed sample batch.
"""
return sample_batch
@DeveloperAPI
def learn_on_batch(self, samples):
"""Fused compute gradients and apply gradients call.
Either this or the combination of compute/apply grads must be
implemented by subclasses.
Returns:
grad_info: dictionary of extra metadata from compute_gradients().
Examples:
>>> batch = ev.sample()
>>> ev.learn_on_batch(samples)
"""
grads, grad_info = self.compute_gradients(samples)
self.apply_gradients(grads)
return grad_info
@DeveloperAPI
def compute_gradients(self, postprocessed_batch):
"""Computes gradients against a batch of experiences.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
grads (list): List of gradient output values
info (dict): Extra policy-specific values
"""
raise NotImplementedError
@DeveloperAPI
def apply_gradients(self, gradients):
"""Applies previously computed gradients.
Either this or learn_on_batch() must be implemented by subclasses.
"""
raise NotImplementedError
@DeveloperAPI
def get_weights(self):
"""Returns model weights.
Returns:
weights (obj): Serializable copy or view of model weights
"""
pass
@DeveloperAPI
def set_weights(self, weights):
"""Sets model weights.
Arguments:
weights (obj): Serializable copy or view of model weights
"""
pass
@DeveloperAPI
def num_state_tensors(self):
"""
Returns:
int: The number of RNN hidden states kept by this Policy's Model.
"""
return 0
@DeveloperAPI
def get_initial_state(self):
"""Returns initial RNN state for the current policy."""
return []
@DeveloperAPI
def get_state(self):
"""Saves all local state.
Returns:
state (obj): Serialized local state.
"""
return self.get_weights()
@DeveloperAPI
def set_state(self, state):
"""Restores all local state.
Arguments:
state (obj): Serialized local state.
"""
self.set_weights(state)
@DeveloperAPI
def on_global_var_update(self, global_vars):
"""Called on an update to global vars.
Arguments:
global_vars (dict): Global variables broadcast from the driver.
"""
pass
@DeveloperAPI
def export_model(self, export_dir):
"""Export Policy to local directory for serving.
Arguments:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
@DeveloperAPI
def export_checkpoint(self, export_dir):
"""Export Policy checkpoint to local directory.
Argument:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def clip_action(action, space):
"""
Called to clip actions to the specified range of this policy.
Arguments:
action: Single action.
space: Action space the actions should be present in.
Returns:
Clipped batch of actions.
"""
if isinstance(space, gym.spaces.Box):
return np.clip(action, space.low, space.high)
elif isinstance(space, gym.spaces.Tuple):
if type(action) not in (tuple, list):
raise ValueError(
"Expected tuple space for actions {}: {}".
format(action, space))
out = []
for a, s in zip(action, space.spaces):
out.append(clip_action(a, s))
return out
else:
return action
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/rnn_sequencing.py
|
Python
|
"""RNN utils for RLlib.
The main trick here is that we add the time dimension at the last moment.
The non-LSTM layers of the model see their inputs as one flat batch. Before
the LSTM cell, we reshape the input to add the expected time dimension. During
postprocessing, we dynamically pad the experience batches so that this
reshaping is possible.
Note that this padding strategy only works out if we assume zero inputs don't
meaningfully affect the loss function. This happens to be true for all the
current algorithms: https://github.com/ray-project/ray/issues/2992
"""
import numpy as np
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@DeveloperAPI
def add_time_dimension(padded_inputs, seq_lens):
"""Adds a time dimension to padded inputs.
Arguments:
padded_inputs (Tensor): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
seq_lens (Tensor): the sequence lengths within the input batch,
suitable for passing to tf.nn.dynamic_rnn().
Returns:
Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].
"""
# Sequence lengths have to be specified for LSTM batch inputs. The
# input batch must be padded to the max seq length given here. That is,
# batch_size == len(seq_lens) * max(seq_lens)
padded_batch_size = tf.shape(padded_inputs)[0]
max_seq_len = padded_batch_size // tf.shape(seq_lens)[0]
# Dynamically reshape the padded batch to introduce a time dimension.
new_batch_size = padded_batch_size // max_seq_len
new_shape = ([new_batch_size, max_seq_len] +
padded_inputs.get_shape().as_list()[1:])
return tf.reshape(padded_inputs, new_shape)
@DeveloperAPI
def chop_into_sequences(episode_ids,
unroll_ids,
agent_indices,
feature_columns,
state_columns,
max_seq_len,
dynamic_max=True,
shuffle=False,
_extra_padding=0):
"""Truncate and pad experiences into fixed-length sequences.
Arguments:
episode_ids (list): List of episode ids for each step.
unroll_ids (list): List of identifiers for the sample batch. This is
used to make sure sequences are cut between sample batches.
agent_indices (list): List of agent ids for each step. Note that this
has to be combined with episode_ids for uniqueness.
feature_columns (list): List of arrays containing features.
state_columns (list): List of arrays containing LSTM state values.
max_seq_len (int): Max length of sequences before truncation.
dynamic_max (bool): Whether to dynamically shrink the max seq len.
For example, if max len is 20 and the actual max seq len in the
data is 7, it will be shrunk to 7.
shuffle (bool): Whether to shuffle the sequence outputs.
_extra_padding (int): Add extra padding to the end of sequences.
Returns:
f_pad (list): Padded feature columns. These will be of shape
[NUM_SEQUENCES * MAX_SEQ_LEN, ...].
s_init (list): Initial states for each sequence, of shape
[NUM_SEQUENCES, ...].
seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].
Examples:
>>> f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=[1, 1, 5, 5, 5, 5],
unroll_ids=[4, 4, 4, 4, 4, 4],
agent_indices=[0, 0, 0, 0, 0, 0],
feature_columns=[[4, 4, 8, 8, 8, 8],
[1, 1, 0, 1, 1, 0]],
state_columns=[[4, 5, 4, 5, 5, 5]],
max_seq_len=3)
>>> print(f_pad)
[[4, 4, 0, 8, 8, 8, 8, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0]]
>>> print(s_init)
[[4, 4, 5]]
>>> print(seq_lens)
[2, 3, 1]
"""
prev_id = None
seq_lens = []
seq_len = 0
unique_ids = np.add(
np.add(episode_ids, agent_indices),
np.array(unroll_ids) << 32)
for uid in unique_ids:
if (prev_id is not None and uid != prev_id) or \
seq_len >= max_seq_len:
seq_lens.append(seq_len)
seq_len = 0
seq_len += 1
prev_id = uid
if seq_len:
seq_lens.append(seq_len)
assert sum(seq_lens) == len(unique_ids)
seq_lens = np.array(seq_lens)
# Dynamically shrink max len as needed to optimize memory usage
if dynamic_max:
max_seq_len = max(seq_lens) + _extra_padding
feature_sequences = []
for f in feature_columns:
f = np.array(f)
f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:])
seq_base = 0
i = 0
for l in seq_lens:
for seq_offset in range(l):
f_pad[seq_base + seq_offset] = f[i]
i += 1
seq_base += max_seq_len
assert i == len(unique_ids), f
feature_sequences.append(f_pad)
initial_states = []
for s in state_columns:
s = np.array(s)
s_init = []
i = 0
for l in seq_lens:
s_init.append(s[i])
i += l
initial_states.append(np.array(s_init))
if shuffle:
permutation = np.random.permutation(len(seq_lens))
for i, f in enumerate(feature_sequences):
orig_shape = f.shape
f = np.reshape(f, (len(seq_lens), -1) + f.shape[1:])
f = f[permutation]
f = np.reshape(f, orig_shape)
feature_sequences[i] = f
for i, s in enumerate(initial_states):
s = s[permutation]
initial_states[i] = s
seq_lens = seq_lens[permutation]
return feature_sequences, initial_states, seq_lens
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/sample_batch.py
|
Python
|
import six
import collections
import numpy as np
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
from ray.rllib.utils.compression import pack, unpack, is_compressed
from ray.rllib.utils.memory import concat_aligned
# Default policy id for single agent environments
DEFAULT_POLICY_ID = "default_policy"
@PublicAPI
class SampleBatch:
"""Wrapper around a dictionary with string keys and array-like values.
For example, {"obs": [1, 2, 3], "reward": [0, -1, 1]} is a batch of three
samples, each with an "obs" and "reward" attribute.
"""
# Outputs from interacting with the environment
CUR_OBS = "obs"
NEXT_OBS = "new_obs"
ACTIONS = "actions"
REWARDS = "rewards"
PREV_ACTIONS = "prev_actions"
PREV_REWARDS = "prev_rewards"
DONES = "dones"
INFOS = "infos"
# Uniquely identifies an episode
EPS_ID = "eps_id"
# Uniquely identifies a sample batch. This is important to distinguish RNN
# sequences from the same episode when multiple sample batches are
# concatenated (fusing sequences across batches can be unsafe).
UNROLL_ID = "unroll_id"
# Uniquely identifies an agent within an episode
AGENT_INDEX = "agent_index"
# Value function predictions emitted by the behaviour policy
VF_PREDS = "vf_preds"
@PublicAPI
def __init__(self, *args, **kwargs):
"""Constructs a sample batch (same params as dict constructor)."""
self.data = dict(*args, **kwargs)
lengths = []
for k, v in self.data.copy().items():
assert isinstance(k, six.string_types), self
lengths.append(len(v))
self.data[k] = np.array(v, copy=False)
if not lengths:
raise ValueError("Empty sample batch")
assert len(set(lengths)) == 1, ("data columns must be same length",
self.data, lengths)
self.count = lengths[0]
@staticmethod
@PublicAPI
def concat_samples(samples):
if isinstance(samples[0], MultiAgentBatch):
return MultiAgentBatch.concat_samples(samples)
out = {}
samples = [s for s in samples if s.count > 0]
for k in samples[0].keys():
out[k] = concat_aligned([s[k] for s in samples])
return SampleBatch(out)
@PublicAPI
def concat(self, other):
"""Returns a new SampleBatch with each data column concatenated.
Examples:
>>> b1 = SampleBatch({"a": [1, 2]})
>>> b2 = SampleBatch({"a": [3, 4, 5]})
>>> print(b1.concat(b2))
{"a": [1, 2, 3, 4, 5]}
"""
assert self.keys() == other.keys(), "must have same columns"
out = {}
for k in self.keys():
out[k] = concat_aligned([self[k], other[k]])
return SampleBatch(out)
@PublicAPI
def copy(self):
return SampleBatch(
{k: np.array(v, copy=True)
for (k, v) in self.data.items()})
@PublicAPI
def rows(self):
"""Returns an iterator over data rows, i.e. dicts with column values.
Examples:
>>> batch = SampleBatch({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> for row in batch.rows():
print(row)
{"a": 1, "b": 4}
{"a": 2, "b": 5}
{"a": 3, "b": 6}
"""
for i in range(self.count):
row = {}
for k in self.keys():
row[k] = self[k][i]
yield row
@PublicAPI
def columns(self, keys):
"""Returns a list of just the specified columns.
Examples:
>>> batch = SampleBatch({"a": [1], "b": [2], "c": [3]})
>>> print(batch.columns(["a", "b"]))
[[1], [2]]
"""
out = []
for k in keys:
out.append(self[k])
return out
@PublicAPI
def shuffle(self):
"""Shuffles the rows of this batch in-place."""
permutation = np.random.permutation(self.count)
for key, val in self.items():
self[key] = val[permutation]
@PublicAPI
def split_by_episode(self):
"""Splits this batch's data by `eps_id`.
Returns:
list of SampleBatch, one per distinct episode.
"""
slices = []
cur_eps_id = self.data["eps_id"][0]
offset = 0
for i in range(self.count):
next_eps_id = self.data["eps_id"][i]
if next_eps_id != cur_eps_id:
slices.append(self.slice(offset, i))
offset = i
cur_eps_id = next_eps_id
slices.append(self.slice(offset, self.count))
for s in slices:
slen = len(set(s["eps_id"]))
assert slen == 1, (s, slen)
assert sum(s.count for s in slices) == self.count, (slices, self.count)
return slices
@PublicAPI
def slice(self, start, end):
"""Returns a slice of the row data of this batch.
Arguments:
start (int): Starting index.
end (int): Ending index.
Returns:
SampleBatch which has a slice of this batch's data.
"""
return SampleBatch({k: v[start:end] for k, v in self.data.items()})
@PublicAPI
def keys(self):
return self.data.keys()
@PublicAPI
def items(self):
return self.data.items()
@PublicAPI
def get(self, key):
return self.data.get(key)
@PublicAPI
def __getitem__(self, key):
return self.data[key]
@PublicAPI
def __setitem__(self, key, item):
self.data[key] = item
@DeveloperAPI
def compress(self, bulk=False, columns=frozenset(["obs", "new_obs"])):
for key in columns:
if key in self.data:
if bulk:
self.data[key] = pack(self.data[key])
else:
self.data[key] = np.array(
[pack(o) for o in self.data[key]])
@DeveloperAPI
def decompress_if_needed(self, columns=frozenset(["obs", "new_obs"])):
for key in columns:
if key in self.data:
arr = self.data[key]
if is_compressed(arr):
self.data[key] = unpack(arr)
elif len(arr) > 0 and is_compressed(arr[0]):
self.data[key] = np.array(
[unpack(o) for o in self.data[key]])
def __str__(self):
return "SampleBatch({})".format(str(self.data))
def __repr__(self):
return "SampleBatch({})".format(str(self.data))
def __iter__(self):
return self.data.__iter__()
def __contains__(self, x):
return x in self.data
@PublicAPI
class MultiAgentBatch:
"""A batch of experiences from multiple policies in the environment.
Attributes:
policy_batches (dict): Mapping from policy id to a normal SampleBatch
of experiences. Note that these batches may be of different length.
count (int): The number of timesteps in the environment this batch
contains. This will be less than the number of transitions this
batch contains across all policies in total.
"""
@PublicAPI
def __init__(self, policy_batches, count):
self.policy_batches = policy_batches
self.count = count
@staticmethod
@PublicAPI
def wrap_as_needed(batches, count):
if len(batches) == 1 and DEFAULT_POLICY_ID in batches:
return batches[DEFAULT_POLICY_ID]
return MultiAgentBatch(batches, count)
@staticmethod
@PublicAPI
def concat_samples(samples):
policy_batches = collections.defaultdict(list)
total_count = 0
for s in samples:
assert isinstance(s, MultiAgentBatch)
for policy_id, batch in s.policy_batches.items():
policy_batches[policy_id].append(batch)
total_count += s.count
out = {}
for policy_id, batches in policy_batches.items():
out[policy_id] = SampleBatch.concat_samples(batches)
return MultiAgentBatch(out, total_count)
@PublicAPI
def copy(self):
return MultiAgentBatch(
{k: v.copy()
for (k, v) in self.policy_batches.items()}, self.count)
@PublicAPI
def total(self):
ct = 0
for batch in self.policy_batches.values():
ct += batch.count
return ct
@DeveloperAPI
def compress(self, bulk=False, columns=frozenset(["obs", "new_obs"])):
for batch in self.policy_batches.values():
batch.compress(bulk=bulk, columns=columns)
@DeveloperAPI
def decompress_if_needed(self, columns=frozenset(["obs", "new_obs"])):
for batch in self.policy_batches.values():
batch.decompress_if_needed(columns)
def __str__(self):
return "MultiAgentBatch({}, count={})".format(
str(self.policy_batches), self.count)
def __repr__(self):
return "MultiAgentBatch({}, count={})".format(
str(self.policy_batches), self.count)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/tests/test_policy.py
|
Python
|
import random
from ray.rllib.policy.policy import Policy
class TestPolicy(Policy):
"""
A dummy Policy that returns a random (batched) int for compute_actions
and implements all other abstract methods of Policy with "pass".
"""
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
deterministic=None,
explore=True,
time_step=None,
**kwargs):
return [random.choice([0, 1])] * len(obs_batch), [], {}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/tf_policy.py
|
Python
|
import errno
import logging
import os
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY, \
ACTION_PROB, ACTION_LOGP
from ray.rllib.policy.rnn_sequencing import chop_into_sequences
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
class TFPolicy(Policy):
"""An agent policy and loss implemented in TensorFlow.
Extending this class enables RLlib to perform TensorFlow specific
optimizations on the policy, e.g., parallelization across gpus or
fusing multiple graphs together in the multi-agent setting.
Input tensors are typically shaped like [BATCH_SIZE, ...].
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
model (rllib.models.Model): RLlib model used for the policy.
Examples:
>>> policy = TFPolicySubclass(
sess, obs_input, action_sampler, loss, loss_inputs)
>>> print(policy.compute_actions([1, 0, 2]))
(array([0, 1, 1]), [], {})
>>> print(policy.postprocess_trajectory(SampleBatch({...})))
SampleBatch({"action": ..., "advantages": ..., ...})
"""
@DeveloperAPI
def __init__(self,
observation_space,
action_space,
config,
sess,
obs_input,
action_sampler,
loss,
loss_inputs,
model=None,
action_logp=None,
state_inputs=None,
state_outputs=None,
prev_action_input=None,
prev_reward_input=None,
seq_lens=None,
max_seq_len=20,
batch_divisibility_req=1,
update_ops=None):
"""Initialize the policy.
Arguments:
observation_space (gym.Space): Observation space of the env.
action_space (gym.Space): Action space of the env.
sess (Session): TensorFlow session to use.
obs_input (Tensor): input placeholder for observations, of shape
[BATCH_SIZE, obs...].
action_sampler (Tensor): Tensor for sampling an action, of shape
[BATCH_SIZE, action...]
loss (Tensor): scalar policy loss output tensor.
loss_inputs (list): a (name, placeholder) tuple for each loss
input argument. Each placeholder name must correspond to a
SampleBatch column key returned by postprocess_trajectory(),
and has shape [BATCH_SIZE, data...]. These keys will be read
from postprocessed sample batches and fed into the specified
placeholders during loss computation.
model (rllib.models.Model): used to integrate custom losses and
stats from user-defined RLlib models.
action_logp (Tensor): log probability of the sampled action.
state_inputs (list): list of RNN state input Tensors.
state_outputs (list): list of RNN state output Tensors.
prev_action_input (Tensor): placeholder for previous actions
prev_reward_input (Tensor): placeholder for previous rewards
seq_lens (Tensor): placeholder for RNN sequence lengths, of shape
[NUM_SEQUENCES]. Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (list): override the batchnorm update ops to run when
applying gradients. Otherwise we run all update ops found in
the current variable scope.
"""
super(TFPolicy, self).__init__(observation_space, action_space, config)
self.model = model
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampler = action_sampler
self._is_training = self._get_is_training_placeholder()
self._action_logp = action_logp
self._action_prob = (tf.exp(self._action_logp)
if self._action_logp is not None else None)
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._stats_fetches = {}
self._loss_input_dict = None
if loss is not None:
self._initialize_loss(loss, loss_inputs)
else:
self._loss = None
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: "
"{} vs {}".format(self._state_inputs, self._state_outputs))
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(),
self._state_inputs))
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined")
def variables(self):
"""Return the list of all savable variables for this policy."""
return self.model.variables()
def get_placeholder(self, name):
"""Returns the given action or loss input placeholder by name.
If the loss has not been initialized and a loss input placeholder is
requested, an error is raised.
"""
obs_inputs = {
SampleBatch.CUR_OBS: self._obs_input,
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
}
if name in obs_inputs:
return obs_inputs[name]
assert self._loss_input_dict is not None, \
"Should have set this before get_placeholder can be called"
return self._loss_input_dict[name]
def get_session(self):
"""Returns a reference to the TF session for this policy."""
return self._sess
def loss_initialized(self):
"""Returns whether the loss function has been initialized."""
return self._loss is not None
def _initialize_loss(self, loss, loss_inputs):
self._loss_inputs = loss_inputs
self._loss_input_dict = dict(self._loss_inputs)
for i, ph in enumerate(self._state_inputs):
self._loss_input_dict["state_in_{}".format(i)] = ph
if self.model:
self._loss = self.model.custom_loss(loss, self._loss_input_dict)
self._stats_fetches.update({
"model": self.model.metrics() if isinstance(
self.model, ModelV2) else self.model.custom_stats()
})
else:
self._loss = loss
self._optimizer = self.optimizer()
self._grads_and_vars = [
(g, v) for (g, v) in self.gradients(self._optimizer, self._loss)
if g is not None
]
self._grads = [g for (g, v) in self._grads_and_vars]
if hasattr(self, "model") and isinstance(self.model, ModelV2):
self._variables = ray.experimental.tf_utils.TensorFlowVariables(
[], self._sess, self.variables())
else:
# TODO(ekl) deprecate support for v1 models
self._variables = ray.experimental.tf_utils.TensorFlowVariables(
self._loss, self._sess)
# gather update ops for any batch norm layers
if not self._update_ops:
self._update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope=tf.get_variable_scope().name)
if self._update_ops:
logger.info("Update ops to run on apply gradient: {}".format(
self._update_ops))
with tf.control_dependencies(self._update_ops):
self._apply_op = self.build_apply_op(self._optimizer,
self._grads_and_vars)
if log_once("loss_used"):
logger.debug(
"These tensors were used in the loss_fn:\n\n{}\n".format(
summarize(self._loss_input_dict)))
self._sess.run(tf.global_variables_initializer())
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
builder = TFRunBuilder(self._sess, "compute_actions")
fetches = self._build_compute_actions(builder, obs_batch,
state_batches, prev_action_batch,
prev_reward_batch)
return builder.get(fetches)
@override(Policy)
def compute_gradients(self, postprocessed_batch):
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "compute_gradients")
fetches = self._build_compute_gradients(builder, postprocessed_batch)
return builder.get(fetches)
@override(Policy)
def apply_gradients(self, gradients):
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "apply_gradients")
fetches = self._build_apply_gradients(builder, gradients)
builder.get(fetches)
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "learn_on_batch")
fetches = self._build_learn_on_batch(builder, postprocessed_batch)
return builder.get(fetches)
@override(Policy)
def get_weights(self):
return self._variables.get_weights()
@override(Policy)
def set_weights(self, weights):
return self._variables.set_weights(weights)
@override(Policy)
def export_model(self, export_dir):
"""Export tensorflow graph to export_dir for serving."""
with self._sess.graph.as_default():
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
signature_def_map = self._build_signature_def()
builder.add_meta_graph_and_variables(
self._sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map=signature_def_map)
builder.save()
@override(Policy)
def export_checkpoint(self, export_dir, filename_prefix="model"):
"""Export tensorflow checkpoint to export_dir."""
try:
os.makedirs(export_dir)
except OSError as e:
# ignore error if export dir already exists
if e.errno != errno.EEXIST:
raise
save_path = os.path.join(export_dir, filename_prefix)
with self._sess.graph.as_default():
saver = tf.train.Saver()
saver.save(self._sess, save_path)
@DeveloperAPI
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders.
Optional, only required to work with the multi-GPU optimizer."""
raise NotImplementedError
def is_recurrent(self):
return len(self._state_inputs) > 0
@override(Policy)
def num_state_tensors(self):
return len(self._state_inputs)
@DeveloperAPI
def extra_compute_action_feed_dict(self):
"""Extra dict to pass to the compute actions session run."""
return {}
@DeveloperAPI
def extra_compute_action_fetches(self):
"""Extra values to fetch and return from compute_actions().
By default we only return action probability info (if present).
"""
if self._action_logp is not None:
return {
ACTION_PROB: self._action_prob,
ACTION_LOGP: self._action_logp,
}
else:
return {}
@DeveloperAPI
def extra_compute_grad_feed_dict(self):
"""Extra dict to pass to the compute gradients session run."""
return {} # e.g, kl_coeff
@DeveloperAPI
def extra_compute_grad_fetches(self):
"""Extra values to fetch and return from compute_gradients()."""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@DeveloperAPI
def optimizer(self):
"""TF optimizer to use for policy optimization."""
if hasattr(self, "config"):
return tf.train.AdamOptimizer(self.config["lr"])
else:
return tf.train.AdamOptimizer()
@DeveloperAPI
def gradients(self, optimizer, loss):
"""Override for custom gradient computation."""
return optimizer.compute_gradients(loss)
@DeveloperAPI
def build_apply_op(self, optimizer, grads_and_vars):
"""Override for custom gradient apply computation."""
# specify global_step for TD3 which needs to count the num updates
return optimizer.apply_gradients(
self._grads_and_vars,
global_step=tf.train.get_or_create_global_step())
@DeveloperAPI
def _get_is_training_placeholder(self):
"""Get the placeholder for _is_training, i.e., for batch norm layers.
This can be called safely before __init__ has run.
"""
if not hasattr(self, "_is_training"):
self._is_training = tf.placeholder_with_default(False, ())
return self._is_training
def _debug_vars(self):
if log_once("grad_vars"):
for _, v in self._grads_and_vars:
logger.info("Optimizing variable {}".format(v))
def _extra_input_signature_def(self):
"""Extra input signatures to add when exporting tf model.
Inferred from extra_compute_action_feed_dict()
"""
feed_dict = self.extra_compute_action_feed_dict()
return {
k.name: tf.saved_model.utils.build_tensor_info(k)
for k in feed_dict.keys()
}
def _extra_output_signature_def(self):
"""Extra output signatures to add when exporting tf model.
Inferred from extra_compute_action_fetches()
"""
fetches = self.extra_compute_action_fetches()
return {
k: tf.saved_model.utils.build_tensor_info(fetches[k])
for k in fetches.keys()
}
def _build_signature_def(self):
"""Build signature def map for tensorflow SavedModelBuilder.
"""
# build input signatures
input_signature = self._extra_input_signature_def()
input_signature["observations"] = \
tf.saved_model.utils.build_tensor_info(self._obs_input)
if self._seq_lens is not None:
input_signature["seq_lens"] = \
tf.saved_model.utils.build_tensor_info(self._seq_lens)
if self._prev_action_input is not None:
input_signature["prev_action"] = \
tf.saved_model.utils.build_tensor_info(self._prev_action_input)
if self._prev_reward_input is not None:
input_signature["prev_reward"] = \
tf.saved_model.utils.build_tensor_info(self._prev_reward_input)
input_signature["is_training"] = \
tf.saved_model.utils.build_tensor_info(self._is_training)
for state_input in self._state_inputs:
input_signature[state_input.name] = \
tf.saved_model.utils.build_tensor_info(state_input)
# build output signatures
output_signature = self._extra_output_signature_def()
output_signature["actions"] = \
tf.saved_model.utils.build_tensor_info(self._sampler)
for state_output in self._state_outputs:
output_signature[state_output.name] = \
tf.saved_model.utils.build_tensor_info(state_output)
signature_def = (
tf.saved_model.signature_def_utils.build_signature_def(
input_signature, output_signature,
tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_key = (tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY)
signature_def_map = {signature_def_key: signature_def}
return signature_def_map
def _build_compute_actions(self,
builder,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None):
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".
format(self._state_inputs, state_batches))
builder.add_feed_dict(self.extra_compute_action_feed_dict())
builder.add_feed_dict({self._obs_input: obs_batch})
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
if self._prev_action_input is not None and \
prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and \
prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
builder.add_feed_dict({self._is_training: False})
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
fetches = builder.add_fetches([self._sampler] + self._state_outputs +
[self.extra_compute_action_fetches()])
return fetches[0], fetches[1:-1], fetches[-1]
def _build_compute_gradients(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict({self._is_training: True})
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False))
fetches = builder.add_fetches(
[self._grads, self._get_grad_and_stats_fetches()])
return fetches[0], fetches[1]
def _build_apply_gradients(self, builder, gradients):
if len(gradients) != len(self._grads):
raise ValueError(
"Unexpected number of gradients to apply, got {} for {}".
format(gradients, self._grads))
builder.add_feed_dict({self._is_training: True})
builder.add_feed_dict(dict(zip(self._grads, gradients)))
fetches = builder.add_fetches([self._apply_op])
return fetches[0]
def _build_learn_on_batch(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False))
builder.add_feed_dict({self._is_training: True})
fetches = builder.add_fetches([
self._apply_op,
self._get_grad_and_stats_fetches(),
])
return fetches[1]
def _get_grad_and_stats_fetches(self):
fetches = self.extra_compute_grad_fetches()
if LEARNER_STATS_KEY not in fetches:
raise ValueError(
"Grad fetches should contain 'stats': {...} entry")
if self._stats_fetches:
fetches[LEARNER_STATS_KEY] = dict(self._stats_fetches,
**fetches[LEARNER_STATS_KEY])
return fetches
def _get_loss_inputs_dict(self, batch, shuffle):
"""Return a feed dict from a batch.
Arguments:
batch (SampleBatch): batch of data to derive inputs from
shuffle (bool): whether to shuffle batch sequences. Shuffle may
be done in-place. This only makes sense if you're further
applying minibatch SGD after getting the outputs.
Returns:
feed dict of data
"""
feed_dict = {}
if self._batch_divisibility_req > 1:
meets_divisibility_reqs = (
len(batch[SampleBatch.CUR_OBS]) %
self._batch_divisibility_req == 0
and max(batch[SampleBatch.AGENT_INDEX]) == 0) # not multiagent
else:
meets_divisibility_reqs = True
# Simple case: not RNN nor do we need to pad
if not self._state_inputs and meets_divisibility_reqs:
if shuffle:
batch.shuffle()
for k, ph in self._loss_inputs:
feed_dict[ph] = batch[k]
return feed_dict
if self._state_inputs:
max_seq_len = self._max_seq_len
dynamic_max = True
else:
max_seq_len = self._batch_divisibility_req
dynamic_max = False
# RNN or multi-agent case
feature_keys = [k for k, v in self._loss_inputs]
state_keys = [
"state_in_{}".format(i) for i in range(len(self._state_inputs))
]
feature_sequences, initial_states, seq_lens = chop_into_sequences(
batch[SampleBatch.EPS_ID],
batch[SampleBatch.UNROLL_ID],
batch[SampleBatch.AGENT_INDEX], [batch[k] for k in feature_keys],
[batch[k] for k in state_keys],
max_seq_len,
dynamic_max=dynamic_max,
shuffle=shuffle)
for k, v in zip(feature_keys, feature_sequences):
feed_dict[self._loss_input_dict[k]] = v
for k, v in zip(state_keys, initial_states):
feed_dict[self._loss_input_dict[k]] = v
feed_dict[self._seq_lens] = seq_lens
if log_once("rnn_feed_dict"):
logger.info("Padded input for RNN:\n\n{}\n".format(
summarize({
"features": feature_sequences,
"initial_states": initial_states,
"seq_lens": seq_lens,
"max_seq_len": max_seq_len,
})))
return feed_dict
@DeveloperAPI
class LearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self.cur_lr = tf.get_variable("lr", initializer=lr, trainable=False)
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1])
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
self.cur_lr.load(
self.lr_schedule.value(global_vars["timestep"]),
session=self._sess)
@override(TFPolicy)
def optimizer(self):
return tf.train.AdamOptimizer(self.cur_lr)
@DeveloperAPI
class EntropyCoeffSchedule:
"""Mixin for TFPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self.entropy_coeff = tf.get_variable(
"entropy_coeff", initializer=entropy_coeff, trainable=False)
if entropy_coeff_schedule is None:
self.entropy_coeff_schedule = ConstantSchedule(entropy_coeff)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self.entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1])
else:
# Implements previous version but enforces outside_value
self.entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
self.entropy_coeff.load(
self.entropy_coeff_schedule.value(global_vars["timestep"]),
session=self._sess)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/tf_policy_template.py
|
Python
|
from ray.rllib.policy.dynamic_tf_policy import DynamicTFPolicy
from ray.rllib.policy import eager_tf_policy
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@DeveloperAPI
def build_tf_policy(name,
loss_fn,
get_default_config=None,
postprocess_fn=None,
stats_fn=None,
optimizer_fn=None,
gradients_fn=None,
apply_gradients_fn=None,
grad_stats_fn=None,
extra_action_fetches_fn=None,
extra_learn_fetches_fn=None,
before_init=None,
before_loss_init=None,
after_init=None,
make_model=None,
action_sampler_fn=None,
mixins=None,
get_batch_divisibility_req=None,
obs_include_prev_action_reward=True):
"""Helper function for creating a dynamic tf policy at runtime.
Functions will be run in this order to initialize the policy:
1. Placeholder setup: postprocess_fn
2. Loss init: loss_fn, stats_fn
3. Optimizer init: optimizer_fn, gradients_fn, apply_gradients_fn,
grad_stats_fn
This means that you can e.g., depend on any policy attributes created in
the running of `loss_fn` in later functions such as `stats_fn`.
In eager mode, the following functions will be run repeatedly on each
eager execution: loss_fn, stats_fn, gradients_fn, apply_gradients_fn,
and grad_stats_fn.
This means that these functions should not define any variables internally,
otherwise they will fail in eager mode execution. Variable should only
be created in make_model (if defined).
Arguments:
name (str): name of the policy (e.g., "PPOTFPolicy")
loss_fn (func): function that returns a loss tensor as arguments
(policy, model, dist_class, train_batch)
get_default_config (func): optional function that returns the default
config to merge with any overrides
postprocess_fn (func): optional experience postprocessing function
that takes the same args as Policy.postprocess_trajectory()
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
optimizer_fn (func): optional function that returns a tf.Optimizer
given the policy and config
gradients_fn (func): optional function that returns a list of gradients
given (policy, optimizer, loss). If not specified, this
defaults to optimizer.compute_gradients(loss)
apply_gradients_fn (func): optional function that returns an apply
gradients op given (policy, optimizer, grads_and_vars)
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy, batch input, and gradient tensors
extra_action_fetches_fn (func): optional function that returns
a dict of TF fetches given the policy object
extra_learn_fetches_fn (func): optional function that returns a dict of
extra values to fetch and return when learning on a batch
before_init (func): optional function to run at the beginning of
policy init that takes the same arguments as the policy constructor
before_loss_init (func): optional function to run prior to loss
init that takes the same arguments as the policy constructor
after_init (func): optional function to run at the end of policy init
that takes the same arguments as the policy constructor
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action prob tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
mixins (list): list of any class mixins for the returned policy class.
These mixins will be applied in order and will have higher
precedence than the DynamicTFPolicy class
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
Returns:
a DynamicTFPolicy instance that uses the specified args
"""
original_kwargs = locals().copy()
base = add_mixins(DynamicTFPolicy, mixins)
class policy_cls(base):
def __init__(self,
obs_space,
action_space,
config,
existing_model=None,
existing_inputs=None):
if get_default_config:
config = dict(get_default_config(), **config)
if before_init:
before_init(self, obs_space, action_space, config)
def before_loss_init_wrapper(policy, obs_space, action_space,
config):
if before_loss_init:
before_loss_init(policy, obs_space, action_space, config)
if extra_action_fetches_fn is None:
self._extra_action_fetches = {}
else:
self._extra_action_fetches = extra_action_fetches_fn(self)
DynamicTFPolicy.__init__(
self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=stats_fn,
grad_stats_fn=grad_stats_fn,
before_loss_init=before_loss_init_wrapper,
make_model=make_model,
action_sampler_fn=action_sampler_fn,
existing_model=existing_model,
existing_inputs=existing_inputs,
get_batch_divisibility_req=get_batch_divisibility_req,
obs_include_prev_action_reward=obs_include_prev_action_reward)
if after_init:
after_init(self, obs_space, action_space, config)
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
if not postprocess_fn:
return sample_batch
return postprocess_fn(self, sample_batch, other_agent_batches,
episode)
@override(TFPolicy)
def optimizer(self):
if optimizer_fn:
return optimizer_fn(self, self.config)
else:
return base.optimizer(self)
@override(TFPolicy)
def gradients(self, optimizer, loss):
if gradients_fn:
return gradients_fn(self, optimizer, loss)
else:
return base.gradients(self, optimizer, loss)
@override(TFPolicy)
def build_apply_op(self, optimizer, grads_and_vars):
if apply_gradients_fn:
return apply_gradients_fn(self, optimizer, grads_and_vars)
else:
return base.build_apply_op(self, optimizer, grads_and_vars)
@override(TFPolicy)
def extra_compute_action_fetches(self):
return dict(
base.extra_compute_action_fetches(self),
**self._extra_action_fetches)
@override(TFPolicy)
def extra_compute_grad_fetches(self):
if extra_learn_fetches_fn:
# auto-add empty learner stats dict if needed
return dict({
LEARNER_STATS_KEY: {}
}, **extra_learn_fetches_fn(self))
else:
return base.extra_compute_grad_fetches(self)
def with_updates(**overrides):
return build_tf_policy(**dict(original_kwargs, **overrides))
def as_eager():
return eager_tf_policy.build_eager_tf_policy(**original_kwargs)
policy_cls.with_updates = staticmethod(with_updates)
policy_cls.as_eager = staticmethod(as_eager)
policy_cls.__name__ = name
policy_cls.__qualname__ = name
return policy_cls
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/torch_policy.py
|
Python
|
import numpy as np
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_torch
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.tracking_dict import UsageTrackingDict
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
torch, _ = try_import_torch()
class TorchPolicy(Policy):
"""Template for a PyTorch policy and loss to use with RLlib.
This is similar to TFPolicy, but for PyTorch.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): Torch model instance
dist_class (type): Torch action distribution class
"""
def __init__(self, observation_space, action_space, config, model, loss,
action_distribution_class):
"""Build a policy from policy and loss torch modules.
Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES
is set. Only single GPU is supported for now.
Arguments:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): The Policy config dict.
model (nn.Module): PyTorch policy module. Given observations as
input, this module must return a list of outputs where the
first item is action logits, and the rest can be any value.
loss (func): Function that takes (policy, model, dist_class,
train_batch) and returns a single scalar loss.
action_distribution_class (ActionDistribution): Class for action
distribution.
"""
super(TorchPolicy, self).__init__(
observation_space, action_space, config
)
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
self.model = model.to(self.device)
self._loss = loss
self._optimizer = self.optimizer()
self.dist_class = action_distribution_class
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
with torch.no_grad():
input_dict = self._lazy_tensor_dict({
SampleBatch.CUR_OBS: obs_batch,
})
if prev_action_batch:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
model_out = self.model(input_dict, state_batches, [1])
logits, state = model_out
action_dist = self.dist_class(logits, self.model)
actions = action_dist.sample()
return (actions.cpu().numpy(), [h.cpu().numpy() for h in state],
self.extra_action_out(input_dict, state_batches,
self.model))
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out = self._loss(self, self.model, self.dist_class, train_batch)
self._optimizer.zero_grad()
loss_out.backward()
grad_process_info = self.extra_grad_process()
self._optimizer.step()
grad_info = self.extra_grad_info(train_batch)
grad_info.update(grad_process_info)
return {LEARNER_STATS_KEY: grad_info}
@override(Policy)
def compute_gradients(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out = self._loss(self, self.model, self.dist_class, train_batch)
self._optimizer.zero_grad()
loss_out.backward()
grad_process_info = self.extra_grad_process()
# Note that return values are just references;
# calling zero_grad will modify the values
grads = []
for p in self.model.parameters():
if p.grad is not None:
grads.append(p.grad.data.cpu().numpy())
else:
grads.append(None)
grad_info = self.extra_grad_info(train_batch)
grad_info.update(grad_process_info)
return grads, {LEARNER_STATS_KEY: grad_info}
@override(Policy)
def apply_gradients(self, gradients):
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizer.step()
@override(Policy)
def get_weights(self):
return {k: v.cpu() for k, v in self.model.state_dict().items()}
@override(Policy)
def set_weights(self, weights):
self.model.load_state_dict(weights)
@override(Policy)
def num_state_tensors(self):
return len(self.model.get_initial_state())
@override(Policy)
def get_initial_state(self):
return [s.numpy() for s in self.model.get_initial_state()]
def extra_grad_process(self):
"""Allow subclass to do extra processing on gradients and
return processing info."""
return {}
def extra_action_out(self, input_dict, state_batches, model,
action_dist=None):
"""Returns dict of extra info to include in experience batch.
Arguments:
input_dict (dict): Dict of model input tensors.
state_batches (list): List of state tensors.
model (TorchModelV2): Reference to the model.
action_dist (Distribution): Torch Distribution object to get
log-probs (e.g. for already sampled actions).
"""
return {}
def extra_grad_info(self, train_batch):
"""Return dict of extra grad info."""
return {}
def optimizer(self):
"""Custom PyTorch optimizer to use."""
if hasattr(self, "config"):
return torch.optim.Adam(
self.model.parameters(), lr=self.config["lr"])
else:
return torch.optim.Adam(self.model.parameters())
def _lazy_tensor_dict(self, postprocessed_batch):
train_batch = UsageTrackingDict(postprocessed_batch)
def convert(arr):
tensor = torch.from_numpy(np.asarray(arr))
if tensor.dtype == torch.double:
tensor = tensor.float()
return tensor.to(self.device)
train_batch.set_get_interceptor(convert)
return train_batch
@override(Policy)
def export_model(self, export_dir):
"""TODO: implement for torch.
"""
raise NotImplementedError
@override(Policy)
def export_checkpoint(self, export_dir):
"""TODO: implement for torch.
"""
raise NotImplementedError
@DeveloperAPI
class LearningRateSchedule(object):
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self.cur_lr = lr
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1]
)
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
self.cur_lr = self.lr_schedule.value(global_vars["timestep"])
@override(TorchPolicy)
def optimizer(self):
for p in self._optimizer.param_groups:
p["lr"] = self.cur_lr
return self._optimizer
@DeveloperAPI
class EntropyCoeffSchedule(object):
"""Mixin for TorchPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self.entropy_coeff = entropy_coeff
if entropy_coeff_schedule is None:
self.entropy_coeff_schedule = ConstantSchedule(entropy_coeff)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self.entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1])
else:
# Implements previous version but enforces outside_value
self.entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
self.entropy_coeff = self.entropy_coeff_schedule.value(
global_vars["timestep"]
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/policy/torch_policy_template.py
|
Python
|
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override, DeveloperAPI
@DeveloperAPI
def build_torch_policy(name,
loss_fn,
get_default_config=None,
stats_fn=None,
postprocess_fn=None,
extra_action_out_fn=None,
extra_grad_process_fn=None,
optimizer_fn=None,
before_init=None,
after_init=None,
make_model_and_action_dist=None,
mixins=None):
"""Helper function for creating a torch policy at runtime.
Arguments:
name (str): name of the policy (e.g., "PPOTorchPolicy")
loss_fn (func): function that returns a loss tensor as arguments
(policy, model, dist_class, train_batch)
get_default_config (func): optional function that returns the default
config to merge with any overrides
stats_fn (func): optional function that returns a dict of
values given the policy and batch input tensors
postprocess_fn (func): optional experience postprocessing function
that takes the same args as Policy.postprocess_trajectory()
extra_action_out_fn (func): optional function that returns
a dict of extra values to include in experiences
extra_grad_process_fn (func): optional function that is called after
gradients are computed and returns processing info
optimizer_fn (func): optional function that returns a torch optimizer
given the policy and config
before_init (func): optional function to run at the beginning of
policy init that takes the same arguments as the policy constructor
after_init (func): optional function to run at the end of policy init
that takes the same arguments as the policy constructor
make_model_and_action_dist (func): optional func that takes the same
arguments as policy init and returns a tuple of model instance and
torch action distribution class. If not specified, the default
model and action dist from the catalog will be used
mixins (list): list of any class mixins for the returned policy class.
These mixins will be applied in order and will have higher
precedence than the TorchPolicy class
Returns:
a TorchPolicy instance that uses the specified args
"""
original_kwargs = locals().copy()
base = add_mixins(TorchPolicy, mixins)
class policy_cls(base):
def __init__(self, obs_space, action_space, config):
if get_default_config:
config = dict(get_default_config(), **config)
self.config = config
if before_init:
before_init(self, obs_space, action_space, config)
if make_model_and_action_dist:
self.model, self.dist_class = make_model_and_action_dist(
self, obs_space, action_space, config)
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], torch=True)
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="torch")
TorchPolicy.__init__(
self, obs_space, action_space, config, self.model,
loss_fn, self.dist_class
)
if after_init:
after_init(self, obs_space, action_space, config)
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
if not postprocess_fn:
return sample_batch
return postprocess_fn(self, sample_batch, other_agent_batches,
episode)
@override(TorchPolicy)
def extra_grad_process(self):
if extra_grad_process_fn:
return extra_grad_process_fn(self)
else:
return TorchPolicy.extra_grad_process(self)
@override(TorchPolicy)
def extra_action_out(self, input_dict, state_batches, model,
action_dist=None):
if extra_action_out_fn:
return extra_action_out_fn(
self, input_dict, state_batches, model, action_dist
)
else:
return TorchPolicy.extra_action_out(
self, input_dict, state_batches, model, action_dist
)
@override(TorchPolicy)
def optimizer(self):
if optimizer_fn:
return optimizer_fn(self, self.config)
else:
return TorchPolicy.optimizer(self)
@override(TorchPolicy)
def extra_grad_info(self, train_batch):
if stats_fn:
return stats_fn(self, train_batch)
else:
return TorchPolicy.extra_grad_info(self, train_batch)
def with_updates(**overrides):
return build_torch_policy(**dict(original_kwargs, **overrides))
policy_cls.with_updates = staticmethod(with_updates)
policy_cls.__name__ = name
policy_cls.__qualname__ = name
return policy_cls
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/rollout.py
|
Python
|
#!/usr/bin/env python
import argparse
import collections
import json
import os
import pickle
import shelve
from pathlib import Path
import gym
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.evaluation.episode import _flatten_action
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.tune.utils import merge_dicts
EXAMPLE_USAGE = """
Example Usage via RLlib CLI:
rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
Example Usage via executable:
./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
"""
# Note: if you use any custom models or envs, register them here first, e.g.:
#
# ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
# register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
class RolloutSaver:
"""Utility class for storing rollouts.
Currently supports two behaviours: the original, which
simply dumps everything to a pickle file once complete,
and a mode which stores each rollout as an entry in a Python
shelf db file. The latter mode is more robust to memory problems
or crashes part-way through the rollout generation. Each rollout
is stored with a key based on the episode number (0-indexed),
and the number of episodes is stored with the key "num_episodes",
so to load the shelf file, use something like:
with shelve.open('rollouts.pkl') as rollouts:
for episode_index in range(rollouts["num_episodes"]):
rollout = rollouts[str(episode_index)]
If outfile is None, this class does nothing.
"""
def __init__(self,
outfile=None,
use_shelve=False,
write_update_file=False,
target_steps=None,
target_episodes=None,
save_info=False):
self._outfile = outfile
self._update_file = None
self._use_shelve = use_shelve
self._write_update_file = write_update_file
self._shelf = None
self._num_episodes = 0
self._rollouts = []
self._current_rollout = []
self._total_steps = 0
self._target_episodes = target_episodes
self._target_steps = target_steps
self._save_info = save_info
def _get_tmp_progress_filename(self):
outpath = Path(self._outfile)
return outpath.parent / ("__progress_" + outpath.name)
@property
def outfile(self):
return self._outfile
def __enter__(self):
if self._outfile:
if self._use_shelve:
# Open a shelf file to store each rollout as they come in
self._shelf = shelve.open(self._outfile)
else:
# Original behaviour - keep all rollouts in memory and save
# them all at the end.
# But check we can actually write to the outfile before going
# through the effort of generating the rollouts:
try:
with open(self._outfile, "wb") as _:
pass
except IOError as x:
print("Can not open {} for writing - cancelling rollouts.".
format(self._outfile))
raise x
if self._write_update_file:
# Open a file to track rollout progress:
self._update_file = self._get_tmp_progress_filename().open(
mode="w")
return self
def __exit__(self, type, value, traceback):
if self._shelf:
# Close the shelf file, and store the number of episodes for ease
self._shelf["num_episodes"] = self._num_episodes
self._shelf.close()
elif self._outfile and not self._use_shelve:
# Dump everything as one big pickle:
pickle.dump(self._rollouts, open(self._outfile, "wb"))
if self._update_file:
# Remove the temp progress file:
self._get_tmp_progress_filename().unlink()
self._update_file = None
def _get_progress(self):
if self._target_episodes:
return "{} / {} episodes completed".format(self._num_episodes,
self._target_episodes)
elif self._target_steps:
return "{} / {} steps completed".format(self._total_steps,
self._target_steps)
else:
return "{} episodes completed".format(self._num_episodes)
def begin_rollout(self):
self._current_rollout = []
def end_rollout(self):
if self._outfile:
if self._use_shelve:
# Save this episode as a new entry in the shelf database,
# using the episode number as the key.
self._shelf[str(self._num_episodes)] = self._current_rollout
else:
# Append this rollout to our list, to save laer.
self._rollouts.append(self._current_rollout)
self._num_episodes += 1
if self._update_file:
self._update_file.seek(0)
self._update_file.write(self._get_progress() + "\n")
self._update_file.flush()
def append_step(self, obs, action, next_obs, reward, done, info):
"""Add a step to the current rollout, if we are saving them"""
if self._outfile:
if self._save_info:
self._current_rollout.append(
[obs, action, next_obs, reward, done, info])
else:
self._current_rollout.append(
[obs, action, next_obs, reward, done])
self._total_steps += 1
def create_parser(parser_creator=None):
parser_creator = parser_creator or argparse.ArgumentParser
parser = parser_creator(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Roll out a reinforcement learning agent "
"given a checkpoint.",
epilog=EXAMPLE_USAGE)
parser.add_argument(
"checkpoint", type=str, help="Checkpoint from which to roll out.")
required_named = parser.add_argument_group("required named arguments")
required_named.add_argument(
"--run",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
required_named.add_argument(
"--env", type=str, help="The gym environment to use.")
parser.add_argument(
"--no-render",
default=False,
action="store_const",
const=True,
help="Surpress rendering of the environment.")
parser.add_argument(
"--monitor",
default=False,
action="store_const",
const=True,
help="Wrap environment in gym Monitor to record video.")
parser.add_argument(
"--steps", default=10000, help="Number of steps to roll out.")
parser.add_argument("--out", default=None, help="Output filename.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Surpresses loading of configuration from checkpoint.")
parser.add_argument(
"--episodes",
default=0,
help="Number of complete episodes to roll out. (Overrides --steps)")
parser.add_argument(
"--save-info",
default=False,
action="store_true",
help="Save the info field generated by the step() method, "
"as well as the action, observations, rewards and done fields.")
parser.add_argument(
"--use-shelve",
default=False,
action="store_true",
help="Save rollouts into a python shelf file (will save each episode "
"as it is generated). An output filename must be set using --out.")
parser.add_argument(
"--track-progress",
default=False,
action="store_true",
help="Write progress to a temporary file (updated "
"after each episode). An output filename must be set using --out; "
"the progress file will live in the same folder.")
return parser
def run(args, parser):
config = {}
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
config_path = os.path.join(config_dir, "params.pkl")
if not os.path.exists(config_path):
config_path = os.path.join(config_dir, "../params.pkl")
if not os.path.exists(config_path):
if not args.config:
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory.")
else:
with open(config_path, "rb") as f:
config = pickle.load(f)
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
config = merge_dicts(config, args.config)
if not args.env:
if not config.get("env"):
parser.error("the following arguments are required: --env")
args.env = config.get("env")
ray.init()
cls = get_agent_class(args.run)
agent = cls(env=args.env, config=config)
agent.restore(args.checkpoint)
num_steps = int(args.steps)
num_episodes = int(args.episodes)
with RolloutSaver(
args.out,
args.use_shelve,
write_update_file=args.track_progress,
target_steps=num_steps,
target_episodes=num_episodes,
save_info=args.save_info) as saver:
rollout(agent, args.env, num_steps, num_episodes, saver,
args.no_render, args.monitor)
class DefaultMapping(collections.defaultdict):
"""default_factory now takes as an argument the missing key."""
def __missing__(self, key):
self[key] = value = self.default_factory(key)
return value
def default_policy_agent_mapping(unused_agent_id):
return DEFAULT_POLICY_ID
def keep_going(steps, num_steps, episodes, num_episodes):
"""Determine whether we've collected enough data"""
# if num_episodes is set, this overrides num_steps
if num_episodes:
return episodes < num_episodes
# if num_steps is set, continue until we reach the limit
if num_steps:
return steps < num_steps
# otherwise keep going forever
return True
def rollout(agent,
env_name,
num_steps,
num_episodes=0,
saver=None,
no_render=True,
monitor=False):
policy_agent_mapping = default_policy_agent_mapping
if saver is None:
saver = RolloutSaver()
if hasattr(agent, "workers"):
env = agent.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if agent.workers.local_worker().multiagent:
policy_agent_mapping = agent.config["multiagent"][
"policy_mapping_fn"]
policy_map = agent.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
action_init = {
p: _flatten_action(m.action_space.sample())
for p, m in policy_map.items()
}
else:
env = gym.make(env_name)
multiagent = False
use_lstm = {DEFAULT_POLICY_ID: False}
if monitor and not no_render and saver and saver.outfile is not None:
# If monitoring has been requested,
# manually wrap our environment with a gym monitor
# which is set to record every episode.
env = gym.wrappers.Monitor(
env, os.path.join(os.path.dirname(saver.outfile), "monitor"),
lambda x: True)
steps = 0
episodes = 0
while keep_going(steps, num_steps, episodes, num_episodes):
mapping_cache = {} # in case policy_agent_mapping is stochastic
saver.begin_rollout()
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]])
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]])
prev_rewards = collections.defaultdict(lambda: 0.)
done = False
reward_total = 0.0
while not done and keep_going(steps, num_steps, episodes,
num_episodes):
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id))
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = agent.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
agent_states[agent_id] = p_state
else:
a_action = agent.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
a_action = _flatten_action(a_action) # tuple actions
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, info = env.step(action)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
if not no_render:
env.render()
saver.append_step(obs, action, next_obs, reward, done, info)
steps += 1
obs = next_obs
saver.end_rollout()
print("Episode #{}: reward: {}".format(episodes, reward_total))
if done:
episodes += 1
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/scripts.py
|
Python
|
#!/usr/bin/env python
import argparse
from ray.rllib import train
from ray.rllib import rollout
EXAMPLE_USAGE = """
Example usage for training:
rllib train --run DQN --env CartPole-v0
Example usage for rollout:
rllib rollout /trial_dir/checkpoint_1/checkpoint-1 --run DQN
"""
def cli():
parser = argparse.ArgumentParser(
description="Train or Run an RLlib Agent.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=EXAMPLE_USAGE)
subcommand_group = parser.add_subparsers(
help="Commands to train or run an RLlib agent.", dest="command")
# see _SubParsersAction.add_parser in
# https://github.com/python/cpython/blob/master/Lib/argparse.py
train_parser = train.create_parser(
lambda **kwargs: subcommand_group.add_parser("train", **kwargs))
rollout_parser = rollout.create_parser(
lambda **kwargs: subcommand_group.add_parser("rollout", **kwargs))
options = parser.parse_args()
if options.command == "train":
train.run(options, train_parser)
elif options.command == "rollout":
rollout.run(options, rollout_parser)
else:
parser.print_help()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/mock_worker.py
|
Python
|
import numpy as np
from ray.rllib.evaluation import SampleBatch
from ray.rllib.utils.filter import MeanStdFilter
class _MockWorker:
def __init__(self, sample_count=10):
self._weights = np.array([-10, -10, -10, -10])
self._grad = np.array([1, 1, 1, 1])
self._sample_count = sample_count
self.obs_filter = MeanStdFilter(())
self.rew_filter = MeanStdFilter(())
self.filters = {
"obs_filter": self.obs_filter,
"rew_filter": self.rew_filter
}
def sample(self):
samples_dict = {"observations": [], "rewards": []}
for i in range(self._sample_count):
samples_dict["observations"].append(
self.obs_filter(np.random.randn()))
samples_dict["rewards"].append(self.rew_filter(np.random.randn()))
return SampleBatch(samples_dict)
def compute_gradients(self, samples):
return self._grad * samples.count, {"batch_count": samples.count}
def apply_gradients(self, grads):
self._weights += self._grad
def get_weights(self):
return self._weights
def set_weights(self, weights):
self._weights = weights
def get_filters(self, flush_after=False):
obs_filter = self.obs_filter.copy()
rew_filter = self.rew_filter.copy()
if flush_after:
self.obs_filter.clear_buffer(), self.rew_filter.clear_buffer()
return {"obs_filter": obs_filter, "rew_filter": rew_filter}
def sync_filters(self, new_filters):
assert all(k in new_filters for k in self.filters)
for k in self.filters:
self.filters[k].sync(new_filters[k])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/multiagent_pendulum.py
|
Python
|
"""Integration test: (1) pendulum works, (2) single-agent multi-agent works."""
import ray
from ray.rllib.tests.test_multi_agent_env import make_multiagent
from ray.tune import run_experiments
from ray.tune.registry import register_env
if __name__ == "__main__":
ray.init()
MultiPendulum = make_multiagent("Pendulum-v0")
register_env("multi_pend", lambda _: MultiPendulum(1))
trials = run_experiments({
"test": {
"run": "PPO",
"env": "multi_pend",
"stop": {
"timesteps_total": 500000,
"episode_reward_mean": -200,
},
"config": {
"train_batch_size": 2048,
"vf_clip_param": 10.0,
"num_workers": 0,
"num_envs_per_worker": 10,
"lambda": 0.1,
"gamma": 0.95,
"lr": 0.0003,
"sgd_minibatch_size": 64,
"num_sgd_iter": 10,
"model": {
"fcnet_hiddens": [64, 64],
},
"batch_mode": "complete_episodes",
},
}
})
if trials[0].last_result["episode_reward_mean"] < -200:
raise ValueError("Did not get to -200 reward", trials[0].last_result)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/run_regression_tests.py
|
Python
|
#!/usr/bin/env python
# Runs one or more regression tests. Retries tests up to 3 times.
#
# Example usage:
# ./run_regression_tests.sh regression-tests/cartpole-es.yaml
import yaml
import sys
import ray
from ray.tune import run_experiments
if __name__ == "__main__":
ray.init()
for test in sys.argv[1:]:
experiments = yaml.load(open(test).read())
print("== Test config ==")
print(yaml.dump(experiments))
for i in range(3):
trials = run_experiments(experiments, resume=False)
num_failures = 0
for t in trials:
if (t.last_result["episode_reward_mean"] <
t.stopping_criterion["episode_reward_mean"]):
num_failures += 1
if not num_failures:
print("Regression test PASSED")
sys.exit(0)
print("Regression test flaked, retry", i)
print("Regression test FAILED")
sys.exit(1)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_avail_actions_qmix.py
|
Python
|
import numpy as np
from gym.spaces import Tuple, Discrete, Dict, Box
import ray
from ray.tune import register_env
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.agents.qmix import QMixTrainer
class AvailActionsTestEnv(MultiAgentEnv):
action_space = Discrete(10)
observation_space = Dict({
"obs": Discrete(3),
"action_mask": Box(0, 1, (10, )),
})
def __init__(self, env_config):
self.state = None
self.avail = env_config["avail_action"]
self.action_mask = np.array([0] * 10)
self.action_mask[env_config["avail_action"]] = 1
def reset(self):
self.state = 0
return {
"agent_1": {
"obs": self.state,
"action_mask": self.action_mask
}
}
def step(self, action_dict):
if self.state > 0:
assert action_dict["agent_1"] == self.avail, \
"Failed to obey available actions mask!"
self.state += 1
rewards = {"agent_1": 1}
obs = {"agent_1": {"obs": 0, "action_mask": self.action_mask}}
dones = {"__all__": self.state > 20}
return obs, rewards, dones, {}
if __name__ == "__main__":
grouping = {
"group_1": ["agent_1"], # trivial grouping for testing
}
obs_space = Tuple([AvailActionsTestEnv.observation_space])
act_space = Tuple([AvailActionsTestEnv.action_space])
register_env(
"action_mask_test",
lambda config: AvailActionsTestEnv(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
ray.init()
agent = QMixTrainer(
env="action_mask_test",
config={
"num_envs_per_worker": 5, # test with vectorization on
"env_config": {
"avail_action": 3,
},
})
for _ in range(5):
agent.train() # OK if it doesn't trip the action assertion error
assert agent.train()["episode_reward_mean"] == 21.0
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_catalog.py
|
Python
|
import gym
import numpy as np
import unittest
from gym.spaces import Box, Discrete, Tuple
import ray
from ray.rllib.models import ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.model import Model
from ray.rllib.models.tf.tf_action_dist import TFActionDistribution
from ray.rllib.models.preprocessors import (NoPreprocessor, OneHotPreprocessor,
Preprocessor)
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.tf.visionnet_v1 import VisionNetwork
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class CustomPreprocessor(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomPreprocessor2(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomModel(Model):
def _build_layers(self, *args):
return tf.constant([[0] * 5]), None
class CustomActionDistribution(TFActionDistribution):
@staticmethod
def required_model_output_shape(action_space, model_config=None):
custom_options = model_config["custom_options"] or {}
if custom_options is not None and custom_options.get("output_dim"):
return custom_options.get("output_dim")
return action_space.shape
def _build_sample_op(self):
custom_options = self.model.model_config["custom_options"]
if "output_dim" in custom_options:
output_shape = tf.concat(
[tf.shape(self.inputs)[:1], custom_options["output_dim"]],
axis=0)
else:
output_shape = tf.shape(self.inputs)
return tf.random_uniform(output_shape)
class ModelCatalogTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testGymPreprocessors(self):
p1 = ModelCatalog.get_preprocessor(gym.make("CartPole-v0"))
self.assertEqual(type(p1), NoPreprocessor)
p2 = ModelCatalog.get_preprocessor(gym.make("FrozenLake-v0"))
self.assertEqual(type(p2), OneHotPreprocessor)
def testTuplePreprocessor(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
class TupleEnv:
def __init__(self):
self.observation_space = Tuple(
[Discrete(5),
Box(0, 5, shape=(3, ), dtype=np.float32)])
p1 = ModelCatalog.get_preprocessor(TupleEnv())
self.assertEqual(p1.shape, (8, ))
self.assertEqual(
list(p1.transform((0, np.array([1, 2, 3])))),
[float(x) for x in [1, 0, 0, 0, 0, 1, 2, 3]])
def testCustomPreprocessor(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_preprocessor("foo", CustomPreprocessor)
ModelCatalog.register_custom_preprocessor("bar", CustomPreprocessor2)
env = gym.make("CartPole-v0")
p1 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "foo"})
self.assertEqual(str(type(p1)), str(CustomPreprocessor))
p2 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "bar"})
self.assertEqual(str(type(p2)), str(CustomPreprocessor2))
p3 = ModelCatalog.get_preprocessor(env)
self.assertEqual(type(p3), NoPreprocessor)
def testDefaultModels(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
with tf.variable_scope("test1"):
p1 = ModelCatalog.get_model({
"obs": tf.zeros((10, 3), dtype=tf.float32)
}, Box(0, 1, shape=(3, ), dtype=np.float32), Discrete(5), 5, {})
self.assertEqual(type(p1), FullyConnectedNetwork)
with tf.variable_scope("test2"):
p2 = ModelCatalog.get_model({
"obs": tf.zeros((10, 84, 84, 3), dtype=tf.float32)
}, Box(0, 1, shape=(84, 84, 3), dtype=np.float32), Discrete(5), 5,
{})
self.assertEqual(type(p2), VisionNetwork)
def testCustomModel(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_model("foo", CustomModel)
p1 = ModelCatalog.get_model({
"obs": tf.constant([1, 2, 3])
}, Box(0, 1, shape=(3, ), dtype=np.float32), Discrete(5), 5,
{"custom_model": "foo"})
self.assertEqual(str(type(p1)), str(CustomModel))
def testCustomActionDistribution(self):
class Model():
pass
ray.init(object_store_memory=1000 * 1024 * 1024)
# registration
ModelCatalog.register_custom_action_dist("test",
CustomActionDistribution)
action_space = Box(0, 1, shape=(5, 3), dtype=np.float32)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(str(dist_cls), str(CustomActionDistribution))
self.assertEqual(param_shape, action_space.shape)
# test the class works as a distribution
dist_input = tf.placeholder(tf.float32, (None, ) + param_shape)
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
# test passing the options to it
model_config["custom_options"].update({"output_dim": (3, )})
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(param_shape, (3, ))
dist_input = tf.placeholder(tf.float32, (None, ) + param_shape)
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
if __name__ == "__main__":
unittest.main(verbosity=1)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_checkpoint_restore.py
|
Python
|
#!/usr/bin/env python
import os
import shutil
import gym
import numpy as np
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.tune.trial import ExportFormat
def get_mean_action(alg, obs):
out = []
for _ in range(2000):
out.append(float(alg.compute_action(obs)))
return np.mean(out)
ray.init(num_cpus=10, object_store_memory=1e9)
CONFIGS = {
"SAC": {},
"ES": {
"episodes_per_batch": 10,
"train_batch_size": 100,
"num_workers": 2,
"noise_size": 2500000,
"observation_filter": "MeanStdFilter"
},
"DQN": {},
"APEX_DDPG": {
"observation_filter": "MeanStdFilter",
"num_workers": 2,
"min_iter_time_s": 1,
"optimizer": {
"num_replay_buffer_shards": 1,
},
},
"DDPG": {
"pure_exploration_steps": 0,
"exploration_ou_noise_scale": 0.0,
"timesteps_per_iteration": 100
},
"PPO": {
"num_sgd_iter": 5,
"train_batch_size": 1000,
"num_workers": 2
},
"A3C": {
"num_workers": 1
},
"ARS": {
"num_rollouts": 10,
"num_workers": 2,
"noise_size": 2500000,
"observation_filter": "MeanStdFilter"
}
}
def test_ckpt_restore(use_object_store, alg_name, failures):
cls = get_agent_class(alg_name)
if "DDPG" in alg_name or "SAC" in alg_name:
alg1 = cls(config=CONFIGS[name], env="Pendulum-v0")
alg2 = cls(config=CONFIGS[name], env="Pendulum-v0")
env = gym.make("Pendulum-v0")
else:
alg1 = cls(config=CONFIGS[name], env="CartPole-v0")
alg2 = cls(config=CONFIGS[name], env="CartPole-v0")
env = gym.make("CartPole-v0")
for _ in range(3):
res = alg1.train()
print("current status: " + str(res))
# Sync the models
if use_object_store:
alg2.restore_from_object(alg1.save_to_object())
else:
alg2.restore(alg1.save())
for _ in range(10):
if "DDPG" in alg_name or "SAC" in alg_name:
obs = np.clip(
np.random.uniform(size=3),
env.observation_space.low,
env.observation_space.high)
else:
obs = np.clip(
np.random.uniform(size=4),
env.observation_space.low,
env.observation_space.high)
a1 = get_mean_action(alg1, obs)
a2 = get_mean_action(alg2, obs)
print("Checking computed actions", alg1, obs, a1, a2)
if abs(a1 - a2) > .1:
failures.append((alg_name, [a1, a2]))
def test_export(algo_name, failures):
def valid_tf_model(model_dir):
return os.path.exists(os.path.join(model_dir, "saved_model.pb")) \
and os.listdir(os.path.join(model_dir, "variables"))
def valid_tf_checkpoint(checkpoint_dir):
return os.path.exists(os.path.join(checkpoint_dir, "model.meta")) \
and os.path.exists(os.path.join(checkpoint_dir, "model.index")) \
and os.path.exists(os.path.join(checkpoint_dir, "checkpoint"))
cls = get_agent_class(algo_name)
if "DDPG" in algo_name or "SAC" in algo_name:
algo = cls(config=CONFIGS[name], env="Pendulum-v0")
else:
algo = cls(config=CONFIGS[name], env="CartPole-v0")
for _ in range(3):
res = algo.train()
print("current status: " + str(res))
export_dir = "/tmp/export_dir_%s" % algo_name
print("Exporting model ", algo_name, export_dir)
algo.export_policy_model(export_dir)
if not valid_tf_model(export_dir):
failures.append(algo_name)
shutil.rmtree(export_dir)
print("Exporting checkpoint", algo_name, export_dir)
algo.export_policy_checkpoint(export_dir)
if not valid_tf_checkpoint(export_dir):
failures.append(algo_name)
shutil.rmtree(export_dir)
print("Exporting default policy", algo_name, export_dir)
algo.export_model([ExportFormat.CHECKPOINT, ExportFormat.MODEL],
export_dir)
if not valid_tf_model(os.path.join(export_dir, ExportFormat.MODEL)) \
or not valid_tf_checkpoint(os.path.join(export_dir,
ExportFormat.CHECKPOINT)):
failures.append(algo_name)
shutil.rmtree(export_dir)
if __name__ == "__main__":
failures = []
for use_object_store in [False, True]:
for name in [
"SAC", "ES", "DQN", "DDPG", "PPO", "A3C", "APEX_DDPG", "ARS"
]:
test_ckpt_restore(use_object_store, name, failures)
assert not failures, failures
print("All checkpoint restore tests passed!")
failures = []
for name in ["SAC", "DQN", "DDPG", "PPO", "A3C"]:
test_export(name, failures)
assert not failures, failures
print("All export tests passed!")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_dependency.py
|
Python
|
#!/usr/bin/env python
import os
import sys
os.environ["RLLIB_TEST_NO_TF_IMPORT"] = "1"
if __name__ == "__main__":
from ray.rllib.agents.a3c import A2CTrainer
assert "tensorflow" not in sys.modules, "TF initially present"
# note: no ray.init(), to test it works without Ray
trainer = A2CTrainer(
env="CartPole-v0", config={
"use_pytorch": True,
"num_workers": 0
})
trainer.train()
assert "tensorflow" not in sys.modules, "TF should not be imported"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_eager_support.py
|
Python
|
import unittest
import ray
from ray import tune
from ray.rllib.agents.registry import get_agent_class
def check_support(alg, config, test_trace=True):
config["eager"] = True
if alg in ["APEX_DDPG", "TD3", "DDPG", "SAC"]:
config["env"] = "Pendulum-v0"
else:
config["env"] = "CartPole-v0"
a = get_agent_class(alg)
config["log_level"] = "ERROR"
config["eager_tracing"] = False
tune.run(a, config=config, stop={"training_iteration": 0})
if test_trace:
config["eager_tracing"] = True
tune.run(a, config=config, stop={"training_iteration": 0})
class TestEagerSupport(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4)
def tearDown(self):
ray.shutdown()
def testSimpleQ(self):
check_support("SimpleQ", {"num_workers": 0, "learning_starts": 0})
def testDQN(self):
check_support("DQN", {"num_workers": 0, "learning_starts": 0})
def testA2C(self):
check_support("A2C", {"num_workers": 0})
def testA3C(self):
# TODO(ekl) trace on is flaky
check_support("A3C", {"num_workers": 1}, test_trace=False)
def testPG(self):
check_support("PG", {"num_workers": 0})
def testPPO(self):
check_support("PPO", {"num_workers": 0})
def testAPPO(self):
check_support("APPO", {"num_workers": 1, "num_gpus": 0})
def testIMPALA(self):
check_support("IMPALA", {"num_workers": 1, "num_gpus": 0})
def testAPEX_DQN(self):
check_support(
"APEX", {
"num_workers": 2,
"learning_starts": 0,
"num_gpus": 0,
"min_iter_time_s": 1,
"timesteps_per_iteration": 100
})
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_env_with_subprocess.py
|
Python
|
"""Tests that envs clean up after themselves on agent exit."""
from gym.spaces import Discrete
import atexit
import gym
import os
import subprocess
import tempfile
import time
import ray
from ray.tune import run_experiments
from ray.tune.registry import register_env
# Dummy command to run as a subprocess with a unique name
UNIQUE_CMD = "sleep {}".format(str(time.time()))
_, UNIQUE_FILE_0 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_1 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_2 = tempfile.mkstemp("test_env_with_subprocess")
_, UNIQUE_FILE_3 = tempfile.mkstemp("test_env_with_subprocess")
class EnvWithSubprocess(gym.Env):
"""Our env that spawns a subprocess."""
def __init__(self, config):
self.action_space = Discrete(2)
self.observation_space = Discrete(2)
# Subprocess that should be cleaned up
self.subproc = subprocess.Popen(UNIQUE_CMD.split(" "), shell=False)
self.config = config
# Exit handler should be called
if config.worker_index == 0:
atexit.register(lambda: os.unlink(UNIQUE_FILE_0))
else:
atexit.register(lambda: os.unlink(UNIQUE_FILE_1))
atexit.register(lambda: self.subproc.kill())
def close(self):
if self.config.worker_index == 0:
os.unlink(UNIQUE_FILE_2)
else:
os.unlink(UNIQUE_FILE_3)
def reset(self):
return 0
def step(self, action):
return 0, 0, True, {}
def leaked_processes():
"""Returns whether any subprocesses were leaked."""
result = subprocess.check_output(
"ps aux | grep '{}' | grep -v grep || true".format(UNIQUE_CMD),
shell=True)
return result
if __name__ == "__main__":
register_env("subproc", lambda config: EnvWithSubprocess(config))
ray.init()
assert os.path.exists(UNIQUE_FILE_0)
assert os.path.exists(UNIQUE_FILE_1)
assert not leaked_processes()
run_experiments({
"demo": {
"run": "PG",
"env": "subproc",
"num_samples": 1,
"config": {
"num_workers": 1,
},
"stop": {
"training_iteration": 1
},
},
})
time.sleep(5.0)
leaked = leaked_processes()
assert not leaked, "LEAKED PROCESSES: {}".format(leaked)
assert not os.path.exists(UNIQUE_FILE_0), "atexit handler not called"
assert not os.path.exists(UNIQUE_FILE_1), "atexit handler not called"
assert not os.path.exists(UNIQUE_FILE_2), "close not called"
assert not os.path.exists(UNIQUE_FILE_3), "close not called"
print("OK")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_evaluators.py
|
Python
|
import unittest
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.a3c import A3CTrainer
from ray.rllib.agents.dqn.dqn_policy import _adjust_nstep
from ray.tune.registry import register_env
import gym
class EvalTest(unittest.TestCase):
def testDqnNStep(self):
obs = [1, 2, 3, 4, 5, 6, 7]
actions = ["a", "b", "a", "a", "a", "b", "a"]
rewards = [10.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0]
new_obs = [2, 3, 4, 5, 6, 7, 8]
dones = [0, 0, 0, 0, 0, 0, 1]
_adjust_nstep(3, 0.9, obs, actions, rewards, new_obs, dones)
self.assertEqual(obs, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(actions, ["a", "b", "a", "a", "a", "b", "a"])
self.assertEqual(new_obs, [4, 5, 6, 7, 8, 8, 8])
self.assertEqual(dones, [0, 0, 0, 0, 1, 1, 1])
self.assertEqual(rewards,
[91.0, 171.0, 271.0, 271.0, 271.0, 190.0, 100.0])
def testEvaluationOption(self):
def env_creator(env_config):
return gym.make("CartPole-v0")
agent_classes = [DQNTrainer, A3CTrainer]
for agent_cls in agent_classes:
ray.init(object_store_memory=1000 * 1024 * 1024)
register_env("CartPoleWrapped-v0", env_creator)
agent = agent_cls(
env="CartPoleWrapped-v0",
config={
"evaluation_interval": 2,
"evaluation_num_episodes": 2,
"evaluation_config": {
"gamma": 0.98,
"env_config": {
"fake_arg": True
}
},
})
# Given evaluation_interval=2, r0, r2, r4 should not contain
# evaluation metrics while r1, r3 should do.
r0 = agent.train()
r1 = agent.train()
r2 = agent.train()
r3 = agent.train()
self.assertTrue("evaluation" in r1)
self.assertTrue("evaluation" in r3)
self.assertFalse("evaluation" in r0)
self.assertFalse("evaluation" in r2)
self.assertTrue("episode_reward_mean" in r1["evaluation"])
self.assertNotEqual(r1["evaluation"], r3["evaluation"])
ray.shutdown()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_external_env.py
|
Python
|
import gym
import numpy as np
import random
import unittest
import uuid
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.tests.test_rollout_worker import (BadPolicy, MockPolicy,
MockEnv)
from ray.tune.registry import register_env
def make_simple_serving(multiagent, superclass):
class SimpleServing(superclass):
def __init__(self, env):
superclass.__init__(self, env.action_space, env.observation_space)
self.env = env
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
action = self.get_action(eid, obs)
obs, reward, done, info = self.env.step(action)
if multiagent:
self.log_returns(eid, reward)
else:
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
return SimpleServing
# generate & register SimpleServing class
SimpleServing = make_simple_serving(False, ExternalEnv)
class PartOffPolicyServing(ExternalEnv):
def __init__(self, env, off_pol_frac):
ExternalEnv.__init__(self, env.action_space, env.observation_space)
self.env = env
self.off_pol_frac = off_pol_frac
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
if random.random() < self.off_pol_frac:
action = self.env.action_space.sample()
self.log_action(eid, obs, action)
else:
action = self.get_action(eid, obs)
obs, reward, done, info = self.env.step(action)
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
class SimpleOffPolicyServing(ExternalEnv):
def __init__(self, env, fixed_action):
ExternalEnv.__init__(self, env.action_space, env.observation_space)
self.env = env
self.fixed_action = fixed_action
def run(self):
eid = self.start_episode()
obs = self.env.reset()
while True:
action = self.fixed_action
self.log_action(eid, obs, action)
obs, reward, done, info = self.env.step(action)
self.log_returns(eid, reward, info=info)
if done:
self.end_episode(eid, obs)
obs = self.env.reset()
eid = self.start_episode()
class MultiServing(ExternalEnv):
def __init__(self, env_creator):
self.env_creator = env_creator
self.env = env_creator()
ExternalEnv.__init__(self, self.env.action_space,
self.env.observation_space)
def run(self):
envs = [self.env_creator() for _ in range(5)]
cur_obs = {}
eids = {}
while True:
active = np.random.choice(range(5), 2, replace=False)
for i in active:
if i not in cur_obs:
eids[i] = uuid.uuid4().hex
self.start_episode(episode_id=eids[i])
cur_obs[i] = envs[i].reset()
actions = [self.get_action(eids[i], cur_obs[i]) for i in active]
for i, action in zip(active, actions):
obs, reward, done, _ = envs[i].step(action)
cur_obs[i] = obs
self.log_returns(eids[i], reward)
if done:
self.end_episode(eids[i], obs)
del cur_obs[i]
class TestExternalEnv(unittest.TestCase):
def testExternalEnvCompleteEpisodes(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
batch_steps=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 50)
def testExternalEnvTruncateEpisodes(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
batch_steps=40,
batch_mode="truncate_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 40)
def testExternalEnvOffPolicy(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleOffPolicyServing(MockEnv(25), 42),
policy=MockPolicy,
batch_steps=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch["actions"][0], 42)
self.assertEqual(batch["actions"][-1], 42)
def testExternalEnvBadActions(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=BadPolicy,
sample_async=True,
batch_steps=40,
batch_mode="truncate_episodes")
self.assertRaises(Exception, lambda: ev.sample())
def testTrainCartpoleOffPolicy(self):
register_env(
"test3", lambda _: PartOffPolicyServing(
gym.make("CartPole-v0"), off_pol_frac=0.2))
dqn = DQNTrainer(env="test3", config={"exploration_fraction": 0.001})
for i in range(100):
result = dqn.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 100:
return
raise Exception("failed to improve reward")
def testTrainCartpole(self):
register_env("test", lambda _: SimpleServing(gym.make("CartPole-v0")))
pg = PGTrainer(env="test", config={"num_workers": 0})
for i in range(100):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 100:
return
raise Exception("failed to improve reward")
def testTrainCartpoleMulti(self):
register_env("test2",
lambda _: MultiServing(lambda: gym.make("CartPole-v0")))
pg = PGTrainer(env="test2", config={"num_workers": 0})
for i in range(100):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 100:
return
raise Exception("failed to improve reward")
def testExternalEnvHorizonNotSupported(self):
ev = RolloutWorker(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy=MockPolicy,
episode_horizon=20,
batch_steps=10,
batch_mode="complete_episodes")
self.assertRaises(ValueError, lambda: ev.sample())
if __name__ == "__main__":
ray.init()
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_external_multi_agent_env.py
|
Python
|
import gym
import numpy as np
import random
import unittest
import ray
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.optimizers import SyncSamplesOptimizer
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.tests.test_rollout_worker import MockPolicy
from ray.rllib.tests.test_external_env import make_simple_serving
from ray.rllib.tests.test_multi_agent_env import BasicMultiAgent, MultiCartpole
from ray.rllib.evaluation.metrics import collect_metrics
SimpleMultiServing = make_simple_serving(True, ExternalMultiAgentEnv)
class TestExternalMultiAgentEnv(unittest.TestCase):
def testExternalMultiAgentEnvCompleteEpisodes(self):
agents = 4
ev = RolloutWorker(
env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),
policy=MockPolicy,
batch_steps=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 40)
self.assertEqual(len(np.unique(batch["agent_index"])), agents)
def testExternalMultiAgentEnvTruncateEpisodes(self):
agents = 4
ev = RolloutWorker(
env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),
policy=MockPolicy,
batch_steps=40,
batch_mode="truncate_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 160)
self.assertEqual(len(np.unique(batch["agent_index"])), agents)
def testExternalMultiAgentEnvSample(self):
agents = 2
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
def testTrainExternalMultiCartpoleManyPolicies(self):
n = 20
single_env = gym.make("CartPole-v0")
act_space = single_env.action_space
obs_space = single_env.observation_space
policies = {}
for i in range(20):
policies["pg_{}".format(i)] = (PGTFPolicy, obs_space, act_space,
{})
policy_ids = list(policies.keys())
ev = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: random.choice(policy_ids),
batch_steps=100)
optimizer = SyncSamplesOptimizer(WorkerSet._from_existing(ev))
for i in range(100):
optimizer.step()
result = collect_metrics(ev)
print("Iteration {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
raise Exception("failed to improve reward")
if __name__ == "__main__":
ray.init()
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_filters.py
|
Python
|
import unittest
import numpy as np
import ray
from ray.rllib.utils.filter import RunningStat, MeanStdFilter
from ray.rllib.utils import FilterManager
from ray.rllib.tests.mock_worker import _MockWorker
class RunningStatTest(unittest.TestCase):
def testRunningStat(self):
for shp in ((), (3, ), (3, 4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
self.assertTrue(np.allclose(rs.mean, m))
v = (np.square(m)
if (len(li) == 1) else np.var(li, ddof=1, axis=0))
self.assertTrue(np.allclose(rs.var, v))
def testCombiningStat(self):
for shape in [(), (3, ), (3, 4)]:
li = []
rs1 = RunningStat(shape)
rs2 = RunningStat(shape)
rs = RunningStat(shape)
for _ in range(5):
val = np.random.randn(*shape)
rs1.push(val)
rs.push(val)
li.append(val)
for _ in range(9):
rs2.push(val)
rs.push(val)
li.append(val)
rs1.update(rs2)
assert np.allclose(rs.mean, rs1.mean)
assert np.allclose(rs.std, rs1.std)
class MSFTest(unittest.TestCase):
def testBasic(self):
for shape in [(), (3, ), (3, 4, 4)]:
filt = MeanStdFilter(shape)
for i in range(5):
filt(np.ones(shape))
self.assertEqual(filt.rs.n, 5)
self.assertEqual(filt.buffer.n, 5)
filt2 = MeanStdFilter(shape)
filt2.sync(filt)
self.assertEqual(filt2.rs.n, 5)
self.assertEqual(filt2.buffer.n, 5)
filt.clear_buffer()
self.assertEqual(filt.buffer.n, 0)
self.assertEqual(filt2.buffer.n, 5)
filt.apply_changes(filt2, with_buffer=False)
self.assertEqual(filt.buffer.n, 0)
self.assertEqual(filt.rs.n, 10)
filt.apply_changes(filt2, with_buffer=True)
self.assertEqual(filt.buffer.n, 5)
self.assertEqual(filt.rs.n, 15)
class FilterManagerTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, object_store_memory=1000 * 1024 * 1024)
def tearDown(self):
ray.shutdown()
def testSynchronize(self):
"""Synchronize applies filter buffer onto own filter"""
filt1 = MeanStdFilter(())
for i in range(10):
filt1(i)
self.assertEqual(filt1.rs.n, 10)
filt1.clear_buffer()
self.assertEqual(filt1.buffer.n, 0)
RemoteWorker = ray.remote(_MockWorker)
remote_e = RemoteWorker.remote(sample_count=10)
remote_e.sample.remote()
FilterManager.synchronize({
"obs_filter": filt1,
"rew_filter": filt1.copy()
}, [remote_e])
filters = ray.get(remote_e.get_filters.remote())
obs_f = filters["obs_filter"]
self.assertEqual(filt1.rs.n, 20)
self.assertEqual(filt1.buffer.n, 0)
self.assertEqual(obs_f.rs.n, filt1.rs.n)
self.assertEqual(obs_f.buffer.n, filt1.buffer.n)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_ignore_worker_failure.py
|
Python
|
import gym
import unittest
import ray
from ray.rllib import _register_all
from ray.rllib.agents.registry import get_agent_class
from ray.tune.registry import register_env
class FaultInjectEnv(gym.Env):
def __init__(self, config):
self.env = gym.make("CartPole-v0")
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.config = config
def reset(self):
return self.env.reset()
def step(self, action):
if self.config.worker_index in self.config["bad_indices"]:
raise ValueError("This is a simulated error from {}".format(
self.config.worker_index))
return self.env.step(action)
class IgnoresWorkerFailure(unittest.TestCase):
def doTest(self, alg, config, fn=None):
fn = fn or self._doTestFaultRecover
try:
ray.init(num_cpus=6)
fn(alg, config)
finally:
ray.shutdown()
_register_all() # re-register the evicted objects
def _doTestFaultRecover(self, alg, config):
register_env("fault_env", lambda c: FaultInjectEnv(c))
agent_cls = get_agent_class(alg)
# Test fault handling
config["num_workers"] = 2
config["ignore_worker_failures"] = True
config["env_config"] = {"bad_indices": [1]}
a = agent_cls(config=config, env="fault_env")
result = a.train()
self.assertTrue(result["num_healthy_workers"], 1)
a.stop()
def _doTestFaultFatal(self, alg, config):
register_env("fault_env", lambda c: FaultInjectEnv(c))
agent_cls = get_agent_class(alg)
# Test raises real error when out of workers
config["num_workers"] = 2
config["ignore_worker_failures"] = True
config["env_config"] = {"bad_indices": [1, 2]}
a = agent_cls(config=config, env="fault_env")
self.assertRaises(Exception, lambda: a.train())
a.stop()
def testFatal(self):
# test the case where all workers fail
self.doTest("PG", {"optimizer": {}}, fn=self._doTestFaultFatal)
def testAsyncGrads(self):
self.doTest("A3C", {"optimizer": {"grads_per_step": 1}})
def testAsyncReplay(self):
self.doTest(
"APEX", {
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
"optimizer": {
"num_replay_buffer_shards": 1,
},
})
def testAsyncSamples(self):
self.doTest("IMPALA", {"num_gpus": 0})
def testSyncReplay(self):
self.doTest("DQN", {"timesteps_per_iteration": 1})
def testMultiGPU(self):
self.doTest(
"PPO", {
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
})
def testSyncSamples(self):
self.doTest("PG", {"optimizer": {}})
def testAsyncSamplingOption(self):
self.doTest("PG", {"optimizer": {}, "sample_async": True})
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_io.py
|
Python
|
import glob
import gym
import json
import numpy as np
import os
import random
import shutil
import tempfile
import time
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.evaluation import SampleBatch
from ray.rllib.offline import IOContext, JsonWriter, JsonReader
from ray.rllib.offline.json_writer import _to_json
from ray.rllib.tests.test_multi_agent_env import MultiCartpole
from ray.tune.registry import register_env
SAMPLES = SampleBatch({
"actions": np.array([1, 2, 3, 4]),
"obs": np.array([4, 5, 6, 7]),
"eps_id": [1, 1, 2, 3],
})
def make_sample_batch(i):
return SampleBatch({
"actions": np.array([i, i, i]),
"obs": np.array([i, i, i])
})
class AgentIOTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def writeOutputs(self, output):
agent = PGTrainer(
env="CartPole-v0",
config={
"output": output,
"sample_batch_size": 250,
})
agent.train()
return agent
def testAgentOutputOk(self):
self.writeOutputs(self.test_dir)
self.assertEqual(len(os.listdir(self.test_dir)), 1)
reader = JsonReader(self.test_dir + "/*.json")
reader.next()
def testAgentOutputLogdir(self):
agent = self.writeOutputs("logdir")
self.assertEqual(len(glob.glob(agent.logdir + "/output-*.json")), 1)
def testAgentInputDir(self):
self.writeOutputs(self.test_dir)
agent = PGTrainer(
env="CartPole-v0",
config={
"input": self.test_dir,
"input_evaluation": [],
})
result = agent.train()
self.assertEqual(result["timesteps_total"], 250) # read from input
self.assertTrue(np.isnan(result["episode_reward_mean"]))
def testSplitByEpisode(self):
splits = SAMPLES.split_by_episode()
self.assertEqual(len(splits), 3)
self.assertEqual(splits[0].count, 2)
self.assertEqual(splits[1].count, 1)
self.assertEqual(splits[2].count, 1)
def testAgentInputPostprocessingEnabled(self):
self.writeOutputs(self.test_dir)
# Rewrite the files to drop advantages and value_targets for testing
for path in glob.glob(self.test_dir + "/*.json"):
out = []
for line in open(path).readlines():
data = json.loads(line)
del data["advantages"]
del data["value_targets"]
out.append(data)
with open(path, "w") as f:
for data in out:
f.write(json.dumps(data))
agent = PGTrainer(
env="CartPole-v0",
config={
"input": self.test_dir,
"input_evaluation": [],
"postprocess_inputs": True, # adds back 'advantages'
})
result = agent.train()
self.assertEqual(result["timesteps_total"], 250) # read from input
self.assertTrue(np.isnan(result["episode_reward_mean"]))
def testAgentInputEvalSim(self):
self.writeOutputs(self.test_dir)
agent = PGTrainer(
env="CartPole-v0",
config={
"input": self.test_dir,
"input_evaluation": ["simulation"],
})
for _ in range(50):
result = agent.train()
if not np.isnan(result["episode_reward_mean"]):
return # simulation ok
time.sleep(0.1)
assert False, "did not see any simulation results"
def testAgentInputList(self):
self.writeOutputs(self.test_dir)
agent = PGTrainer(
env="CartPole-v0",
config={
"input": glob.glob(self.test_dir + "/*.json"),
"input_evaluation": [],
"sample_batch_size": 99,
})
result = agent.train()
self.assertEqual(result["timesteps_total"], 250) # read from input
self.assertTrue(np.isnan(result["episode_reward_mean"]))
def testAgentInputDict(self):
self.writeOutputs(self.test_dir)
agent = PGTrainer(
env="CartPole-v0",
config={
"input": {
self.test_dir: 0.1,
"sampler": 0.9,
},
"train_batch_size": 2000,
"input_evaluation": [],
})
result = agent.train()
self.assertTrue(not np.isnan(result["episode_reward_mean"]))
def testMultiAgent(self):
register_env("multi_cartpole", lambda _: MultiCartpole(10))
single_env = gym.make("CartPole-v0")
def gen_policy():
obs_space = single_env.observation_space
act_space = single_env.action_space
return (PGTFPolicy, obs_space, act_space, {})
pg = PGTrainer(
env="multi_cartpole",
config={
"num_workers": 0,
"output": self.test_dir,
"multiagent": {
"policies": {
"policy_1": gen_policy(),
"policy_2": gen_policy(),
},
"policy_mapping_fn": (
lambda agent_id: random.choice(
["policy_1", "policy_2"])),
},
})
pg.train()
self.assertEqual(len(os.listdir(self.test_dir)), 1)
pg.stop()
pg = PGTrainer(
env="multi_cartpole",
config={
"num_workers": 0,
"input": self.test_dir,
"input_evaluation": ["simulation"],
"train_batch_size": 2000,
"multiagent": {
"policies": {
"policy_1": gen_policy(),
"policy_2": gen_policy(),
},
"policy_mapping_fn": (
lambda agent_id: random.choice(
["policy_1", "policy_2"])),
},
})
for _ in range(50):
result = pg.train()
if not np.isnan(result["episode_reward_mean"]):
return # simulation ok
time.sleep(0.1)
assert False, "did not see any simulation results"
class JsonIOTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def testWriteSimple(self):
ioctx = IOContext(self.test_dir, {}, 0, None)
writer = JsonWriter(
self.test_dir, ioctx, max_file_size=1000, compress_columns=["obs"])
self.assertEqual(len(os.listdir(self.test_dir)), 0)
writer.write(SAMPLES)
writer.write(SAMPLES)
self.assertEqual(len(os.listdir(self.test_dir)), 1)
def testWriteFileURI(self):
ioctx = IOContext(self.test_dir, {}, 0, None)
writer = JsonWriter(
"file:" + self.test_dir,
ioctx,
max_file_size=1000,
compress_columns=["obs"])
self.assertEqual(len(os.listdir(self.test_dir)), 0)
writer.write(SAMPLES)
writer.write(SAMPLES)
self.assertEqual(len(os.listdir(self.test_dir)), 1)
def testWritePaginate(self):
ioctx = IOContext(self.test_dir, {}, 0, None)
writer = JsonWriter(
self.test_dir, ioctx, max_file_size=5000, compress_columns=["obs"])
self.assertEqual(len(os.listdir(self.test_dir)), 0)
for _ in range(100):
writer.write(SAMPLES)
num_files = len(os.listdir(self.test_dir))
assert num_files in [12, 13], num_files
def testReadWrite(self):
ioctx = IOContext(self.test_dir, {}, 0, None)
writer = JsonWriter(
self.test_dir, ioctx, max_file_size=5000, compress_columns=["obs"])
for i in range(100):
writer.write(make_sample_batch(i))
reader = JsonReader(self.test_dir + "/*.json")
seen_a = set()
seen_o = set()
for i in range(1000):
batch = reader.next()
seen_a.add(batch["actions"][0])
seen_o.add(batch["obs"][0])
self.assertGreater(len(seen_a), 90)
self.assertLess(len(seen_a), 101)
self.assertGreater(len(seen_o), 90)
self.assertLess(len(seen_o), 101)
def testSkipsOverEmptyLinesAndFiles(self):
open(self.test_dir + "/empty", "w").close()
with open(self.test_dir + "/f1", "w") as f:
f.write("\n")
f.write("\n")
f.write(_to_json(make_sample_batch(0), []))
with open(self.test_dir + "/f2", "w") as f:
f.write(_to_json(make_sample_batch(1), []))
f.write("\n")
reader = JsonReader([
self.test_dir + "/empty",
self.test_dir + "/f1",
"file:" + self.test_dir + "/f2",
])
seen_a = set()
for i in range(100):
batch = reader.next()
seen_a.add(batch["actions"][0])
self.assertEqual(len(seen_a), 2)
def testSkipsOverCorruptedLines(self):
with open(self.test_dir + "/f1", "w") as f:
f.write(_to_json(make_sample_batch(0), []))
f.write("\n")
f.write(_to_json(make_sample_batch(1), []))
f.write("\n")
f.write(_to_json(make_sample_batch(2), []))
f.write("\n")
f.write(_to_json(make_sample_batch(3), []))
f.write("\n")
f.write("{..corrupted_json_record")
reader = JsonReader([
self.test_dir + "/f1",
])
seen_a = set()
for i in range(10):
batch = reader.next()
seen_a.add(batch["actions"][0])
self.assertEqual(len(seen_a), 4)
def testAbortOnAllEmptyInputs(self):
open(self.test_dir + "/empty", "w").close()
reader = JsonReader([
self.test_dir + "/empty",
])
self.assertRaises(ValueError, lambda: reader.next())
with open(self.test_dir + "/empty1", "w") as f:
for _ in range(100):
f.write("\n")
with open(self.test_dir + "/empty2", "w") as f:
for _ in range(100):
f.write("\n")
reader = JsonReader([
self.test_dir + "/empty1",
self.test_dir + "/empty2",
])
self.assertRaises(ValueError, lambda: reader.next())
if __name__ == "__main__":
ray.init(num_cpus=1)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_legacy.py
|
Python
|
from ray.rllib.agents.ppo import PPOAgent
from ray import tune
import ray
if __name__ == "__main__":
ray.init()
# Test legacy *Agent classes work (renamed to Trainer)
tune.run(
PPOAgent,
config={"env": "CartPole-v0"},
stop={"training_iteration": 2})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_local.py
|
Python
|
import unittest
from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG
import ray
class LocalModeTest(unittest.TestCase):
def testLocal(self):
ray.init(local_mode=True)
cf = DEFAULT_CONFIG.copy()
agent = PPOTrainer(cf, "CartPole-v0")
print(agent.train())
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_lstm.py
|
Python
|
import gym
import numpy as np
import pickle
import unittest
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.policy.rnn_sequencing import chop_into_sequences, \
add_time_dimension
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import linear, normc_initializer
from ray.rllib.models.model import Model
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class LSTMUtilsTest(unittest.TestCase):
def testBasic(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(eps_ids,
np.ones_like(eps_ids),
agent_ids, f, s, 4)
self.assertEqual([f.tolist() for f in f_pad], [
[101, 102, 103, 0, 201, 202, 203, 204, 205, 0, 0, 0],
[[101], [102], [103], [0], [201], [202], [203], [204], [205], [0],
[0], [0]],
])
self.assertEqual([s.tolist() for s in s_init], [[209, 109, 105]])
self.assertEqual(seq_lens.tolist(), [3, 4, 1])
def testMultiDim(self):
eps_ids = [1, 1, 1]
agent_ids = [1, 1, 1]
obs = np.ones((84, 84, 4))
f = [[obs, obs * 2, obs * 3]]
s = [[209, 208, 207]]
f_pad, s_init, seq_lens = chop_into_sequences(eps_ids,
np.ones_like(eps_ids),
agent_ids, f, s, 4)
self.assertEqual([f.tolist() for f in f_pad], [
np.array([obs, obs * 2, obs * 3]).tolist(),
])
self.assertEqual([s.tolist() for s in s_init], [[209]])
self.assertEqual(seq_lens.tolist(), [3])
def testBatchId(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
batch_ids = [1, 1, 2, 2, 3, 3, 4, 4]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
_, _, seq_lens = chop_into_sequences(eps_ids, batch_ids, agent_ids, f,
s, 4)
self.assertEqual(seq_lens.tolist(), [2, 1, 1, 2, 2])
def testMultiAgent(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 2, 1, 1, 2, 2, 3]
f = [[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]]]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(
eps_ids,
np.ones_like(eps_ids),
agent_ids,
f,
s,
4,
dynamic_max=False)
self.assertEqual(seq_lens.tolist(), [2, 1, 2, 2, 1])
self.assertEqual(len(f_pad[0]), 20)
self.assertEqual(len(s_init[0]), 5)
def testDynamicMaxLen(self):
eps_ids = [5, 2, 2]
agent_ids = [2, 2, 2]
f = [[1, 1, 1]]
s = [[1, 1, 1]]
f_pad, s_init, seq_lens = chop_into_sequences(eps_ids,
np.ones_like(eps_ids),
agent_ids, f, s, 4)
self.assertEqual([f.tolist() for f in f_pad], [[1, 0, 1, 1]])
self.assertEqual([s.tolist() for s in s_init], [[1, 1]])
self.assertEqual(seq_lens.tolist(), [1, 2])
class RNNSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
# Previously, a new class object was created during
# deserialization and this `capture_index`
# variable would be refreshed between class instantiations.
# This behavior is no longer the case, so we manually refresh
# the variable.
RNNSpyModel.capture_index = 0
def spy(sequences, state_in, state_out, seq_lens):
if len(sequences) == 1:
return 0 # don't capture inference inputs
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"rnn_spy_in_{}".format(RNNSpyModel.capture_index),
pickle.dumps({
"sequences": sequences,
"state_in": state_in,
"state_out": state_out,
"seq_lens": seq_lens
}),
overwrite=True)
RNNSpyModel.capture_index += 1
return 0
features = input_dict["obs"]
cell_size = 3
last_layer = add_time_dimension(features, self.seq_lens)
# Setup the LSTM cell
lstm = tf.nn.rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
self.state_init = [
np.zeros(lstm.state_size.c, np.float32),
np.zeros(lstm.state_size.h, np.float32)
]
# Setup LSTM inputs
if self.state_in:
c_in, h_in = self.state_in
else:
c_in = tf.placeholder(
tf.float32, [None, lstm.state_size.c], name="c")
h_in = tf.placeholder(
tf.float32, [None, lstm.state_size.h], name="h")
self.state_in = [c_in, h_in]
# Setup LSTM outputs
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_out, lstm_state = tf.nn.dynamic_rnn(
lstm,
last_layer,
initial_state=state_in,
sequence_length=self.seq_lens,
time_major=False,
dtype=tf.float32)
self.state_out = list(lstm_state)
spy_fn = tf.py_func(
spy, [
last_layer,
self.state_in,
self.state_out,
self.seq_lens,
],
tf.int64,
stateful=True)
# Compute outputs
with tf.control_dependencies([spy_fn]):
last_layer = tf.reshape(lstm_out, [-1, cell_size])
logits = linear(last_layer, num_outputs, "action",
normc_initializer(0.01))
return logits, last_layer
class DebugCounterEnv(gym.Env):
def __init__(self):
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Box(0, 100, (1, ))
self.i = 0
def reset(self):
self.i = 0
return [self.i]
def step(self, action):
self.i += 1
return [self.i], self.i % 3, self.i >= 15, {}
class RNNSequencing(unittest.TestCase):
def testSimpleOptimizerSequencing(self):
ModelCatalog.register_custom_model("rnn", RNNSpyModel)
register_env("counter", lambda _: DebugCounterEnv())
ppo = PPOTrainer(
env="counter",
config={
"num_workers": 0,
"sample_batch_size": 10,
"train_batch_size": 10,
"sgd_minibatch_size": 10,
"vf_share_layers": True,
"simple_optimizer": True,
"num_sgd_iter": 1,
"model": {
"custom_model": "rnn",
"max_seq_len": 4,
"state_shape": [3, 3],
},
})
ppo.train()
ppo.train()
batch0 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_0"))
self.assertEqual(
batch0["sequences"].tolist(),
[[[0], [1], [2], [3]], [[4], [5], [6], [7]], [[8], [9], [0], [0]]])
self.assertEqual(batch0["seq_lens"].tolist(), [4, 4, 2])
self.assertEqual(batch0["state_in"][0][0].tolist(), [0, 0, 0])
self.assertEqual(batch0["state_in"][1][0].tolist(), [0, 0, 0])
self.assertGreater(abs(np.sum(batch0["state_in"][0][1])), 0)
self.assertGreater(abs(np.sum(batch0["state_in"][1][1])), 0)
self.assertTrue(
np.allclose(batch0["state_in"][0].tolist()[1:],
batch0["state_out"][0].tolist()[:-1]))
self.assertTrue(
np.allclose(batch0["state_in"][1].tolist()[1:],
batch0["state_out"][1].tolist()[:-1]))
batch1 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_1"))
self.assertEqual(batch1["sequences"].tolist(), [
[[10], [11], [12], [13]],
[[14], [0], [0], [0]],
[[0], [1], [2], [3]],
[[4], [0], [0], [0]],
])
self.assertEqual(batch1["seq_lens"].tolist(), [4, 1, 4, 1])
self.assertEqual(batch1["state_in"][0][2].tolist(), [0, 0, 0])
self.assertEqual(batch1["state_in"][1][2].tolist(), [0, 0, 0])
self.assertGreater(abs(np.sum(batch1["state_in"][0][0])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][0])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][0][1])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][1])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][0][3])), 0)
self.assertGreater(abs(np.sum(batch1["state_in"][1][3])), 0)
def testMinibatchSequencing(self):
ModelCatalog.register_custom_model("rnn", RNNSpyModel)
register_env("counter", lambda _: DebugCounterEnv())
ppo = PPOTrainer(
env="counter",
config={
"shuffle_sequences": False, # for deterministic testing
"num_workers": 0,
"sample_batch_size": 20,
"train_batch_size": 20,
"sgd_minibatch_size": 10,
"vf_share_layers": True,
"simple_optimizer": False,
"num_sgd_iter": 1,
"model": {
"custom_model": "rnn",
"max_seq_len": 4,
"state_shape": [3, 3],
},
})
ppo.train()
ppo.train()
# first epoch: 20 observations get split into 2 minibatches of 8
# four observations are discarded
batch0 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_0"))
batch1 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_1"))
if batch0["sequences"][0][0][0] > batch1["sequences"][0][0][0]:
batch0, batch1 = batch1, batch0 # sort minibatches
self.assertEqual(batch0["seq_lens"].tolist(), [4, 4])
self.assertEqual(batch1["seq_lens"].tolist(), [4, 3])
self.assertEqual(batch0["sequences"].tolist(), [
[[0], [1], [2], [3]],
[[4], [5], [6], [7]],
])
self.assertEqual(batch1["sequences"].tolist(), [
[[8], [9], [10], [11]],
[[12], [13], [14], [0]],
])
# second epoch: 20 observations get split into 2 minibatches of 8
# four observations are discarded
batch2 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_2"))
batch3 = pickle.loads(
ray.experimental.internal_kv._internal_kv_get("rnn_spy_in_3"))
if batch2["sequences"][0][0][0] > batch3["sequences"][0][0][0]:
batch2, batch3 = batch3, batch2
self.assertEqual(batch2["seq_lens"].tolist(), [4, 4])
self.assertEqual(batch3["seq_lens"].tolist(), [2, 4])
self.assertEqual(batch2["sequences"].tolist(), [
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
])
self.assertEqual(batch3["sequences"].tolist(), [
[[13], [14], [0], [0]],
[[0], [1], [2], [3]],
])
if __name__ == "__main__":
ray.init(num_cpus=4)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_multi_agent_env.py
|
Python
|
import gym
import random
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.agents.dqn.dqn_policy import DQNTFPolicy
from ray.rllib.optimizers import (SyncSamplesOptimizer, SyncReplayOptimizer,
AsyncGradientsOptimizer)
from ray.rllib.tests.test_rollout_worker import (MockEnv, MockEnv2, MockPolicy)
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.policy.policy import Policy
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.env.base_env import _MultiAgentEnvToBaseEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class BasicMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 25 steps."""
def __init__(self, num):
self.agents = [MockEnv(25) for _ in range(num)]
self.dones = set()
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
self.resetted = False
def reset(self):
self.resetted = True
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
class EarlyDoneMultiAgent(MultiAgentEnv):
"""Env for testing when the env terminates (after agent 0 does)."""
def __init__(self):
self.agents = [MockEnv(3), MockEnv(5)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % len(self.agents)
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % len(self.agents)
done["__all__"] = len(self.dones) == len(self.agents) - 1
return obs, rew, done, info
class RoundRobinMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 5 steps.
On each step() of the env, only one agent takes an action."""
def __init__(self, num, increment_obs=False):
if increment_obs:
# Observations are 0, 1, 2, 3... etc. as time advances
self.agents = [MockEnv2(5) for _ in range(num)]
else:
# Observations are all zeros
self.agents = [MockEnv(5) for _ in range(num)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.num = num
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % self.num
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % self.num
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
def make_multiagent(env_name):
class MultiEnv(MultiAgentEnv):
def __init__(self, num):
self.agents = [gym.make(env_name) for _ in range(num)]
self.dones = set()
self.observation_space = self.agents[0].observation_space
self.action_space = self.agents[0].action_space
def reset(self):
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
return MultiEnv
MultiCartpole = make_multiagent("CartPole-v0")
MultiMountainCar = make_multiagent("MountainCarContinuous-v0")
class TestMultiAgentEnv(unittest.TestCase):
def testBasicMock(self):
env = BasicMultiAgent(4)
obs = env.reset()
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
for _ in range(24):
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(rew, {0: 1, 1: 1, 2: 1, 3: 1})
self.assertEqual(done, {
0: False,
1: False,
2: False,
3: False,
"__all__": False
})
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(done, {
0: True,
1: True,
2: True,
3: True,
"__all__": True
})
def testRoundRobinMock(self):
env = RoundRobinMultiAgent(2)
obs = env.reset()
self.assertEqual(obs, {0: 0})
for _ in range(5):
obs, rew, done, info = env.step({0: 0})
self.assertEqual(obs, {1: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({1: 0})
self.assertEqual(obs, {0: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({0: 0})
self.assertEqual(done["__all__"], True)
def testNoResetUntilPoll(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 1)
self.assertFalse(env.get_unwrapped()[0].resetted)
env.poll()
self.assertTrue(env.get_unwrapped()[0].resetted)
def testVectorizeBasic(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: None, 1: None}, 1: {0: None, 1: None}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
for _ in range(24):
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(
dones, {
0: {
0: True,
1: True,
"__all__": True
},
1: {
0: True,
1: True,
"__all__": True
}
})
# Reset processing
self.assertRaises(
ValueError, lambda: env.send_actions({
0: {
0: 0,
1: 0
},
1: {
0: 0,
1: 0
}
}))
self.assertEqual(env.try_reset(0), {0: 0, 1: 0})
self.assertEqual(env.try_reset(1), {0: 0, 1: 0})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
def testVectorizeRoundRobin(self):
env = _MultiAgentEnvToBaseEnv(lambda v: RoundRobinMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
self.assertEqual(rew, {0: {0: None}, 1: {0: None}})
env.send_actions({0: {0: 0}, 1: {0: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {1: 0}, 1: {1: 0}})
env.send_actions({0: {1: 0}, 1: {1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
def testMultiAgentSample(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch.policy_batches["p0"].count, 150)
self.assertEqual(batch.policy_batches["p1"].count, 100)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist(),
list(range(25)) * 6)
def testMultiAgentSampleSyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True,
remote_env_batch_wait_ms=99999999)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleAsyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleWithHorizon(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
episode_horizon=10, # test with episode horizon set
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
def testSampleFromEarlyDoneEnv(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: EarlyDoneMultiAgent(),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_mode="complete_episodes",
batch_steps=1)
self.assertRaisesRegexp(ValueError,
".*don't have a last observation.*",
lambda: ev.sample())
def testMultiAgentSampleRoundRobin(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(10)
ev = RolloutWorker(
env_creator=lambda _: RoundRobinMultiAgent(5, increment_obs=True),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
# since we round robin introduce agents into the env, some of the env
# steps don't count as proper transitions
self.assertEqual(batch.policy_batches["p0"].count, 42)
self.assertEqual(batch.policy_batches["p0"]["obs"].tolist()[:10], [
one_hot(0, 10),
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["new_obs"].tolist()[:10], [
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
one_hot(5, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["rewards"].tolist()[:10],
[100, 100, 100, 100, 0] * 2)
self.assertEqual(batch.policy_batches["p0"]["dones"].tolist()[:10],
[False, False, False, False, True] * 2)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist()[:10],
[4, 9, 14, 19, 24, 5, 10, 15, 20, 25])
def test_custom_rnn_state_values(self):
h = {"some": {"arbitrary": "structure", "here": [1, 2, 3]}}
class StatefulPolicy(Policy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [0] * len(obs_batch), [[h] * len(obs_batch)], {}
def get_initial_state(self):
return [{}] # empty dict
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=StatefulPolicy,
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch["state_in_0"][0], {})
self.assertEqual(batch["state_out_0"][0], h)
self.assertEqual(batch["state_in_0"][1], h)
self.assertEqual(batch["state_out_0"][1], h)
def test_returning_model_based_rollouts_data(self):
class ModelBasedPolicy(PGTFPolicy):
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
# Pretend we did a model-based rollout and want to return
# the extra trajectory.
builder = episodes[0].new_batch_builder()
rollout_id = random.randint(0, 10000)
for t in range(5):
builder.add_values(
agent_id="extra_0",
policy_id="p1", # use p1 so we can easily check it
t=t,
eps_id=rollout_id, # new id for each rollout
obs=obs_batch[0],
actions=0,
rewards=0,
dones=t == 4,
infos={},
new_obs=obs_batch[0])
batch = builder.build_and_reset(episode=None)
episodes[0].add_extra_batch(batch)
# Just return zeros for actions
return [0] * len(obs_batch), [], {}
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
ev = RolloutWorker(
env_creator=lambda _: MultiCartpole(2),
policy={
"p0": (ModelBasedPolicy, obs_space, act_space, {}),
"p1": (ModelBasedPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch.policy_batches["p0"].count, 10)
self.assertEqual(batch.policy_batches["p1"].count, 25)
def test_train_multi_cartpole_single_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
pg = PGTrainer(env="multi_cartpole", config={"num_workers": 0})
for i in range(100):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 50 * n:
return
raise Exception("failed to improve reward")
def test_train_multi_cartpole_multi_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
single_env = gym.make("CartPole-v0")
def gen_policy():
config = {
"gamma": random.choice([0.5, 0.8, 0.9, 0.95, 0.99]),
"n_step": random.choice([1, 2, 3, 4, 5]),
}
obs_space = single_env.observation_space
act_space = single_env.action_space
return (None, obs_space, act_space, config)
pg = PGTrainer(
env="multi_cartpole",
config={
"num_workers": 0,
"multiagent": {
"policies": {
"policy_1": gen_policy(),
"policy_2": gen_policy(),
},
"policy_mapping_fn": lambda agent_id: "policy_1",
},
})
# Just check that it runs without crashing
for i in range(10):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_1") in [0, 1])
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_2") in [0, 1])
self.assertRaises(
KeyError,
lambda: pg.compute_action([0, 0, 0, 0], policy_id="policy_3"))
def _testWithOptimizer(self, optimizer_cls):
n = 3
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
dqn_config = {"gamma": 0.95, "n_step": 3}
if optimizer_cls == SyncReplayOptimizer:
# TODO: support replay with non-DQN graphs. Currently this can't
# happen since the replay buffer doesn't encode extra fields like
# "advantages" that PG uses.
policies = {
"p1": (DQNTFPolicy, obs_space, act_space, dqn_config),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
else:
policies = {
"p1": (PGTFPolicy, obs_space, act_space, {}),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: ["p1", "p2"][agent_id % 2],
batch_steps=50)
if optimizer_cls == AsyncGradientsOptimizer:
def policy_mapper(agent_id):
return ["p1", "p2"][agent_id % 2]
remote_workers = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=policy_mapper,
batch_steps=50)
]
else:
remote_workers = []
workers = WorkerSet._from_existing(worker, remote_workers)
optimizer = optimizer_cls(workers)
for i in range(200):
worker.foreach_policy(lambda p, _: p.set_epsilon(
max(0.02, 1 - i * .02))
if isinstance(p, DQNTFPolicy) else None)
optimizer.step()
result = collect_metrics(worker, remote_workers)
if i % 20 == 0:
def do_update(p):
if isinstance(p, DQNTFPolicy):
p.update_target()
worker.foreach_policy(lambda p, _: do_update(p))
print("Iter {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
print(result)
raise Exception("failed to improve reward")
def test_multi_agent_sync_optimizer(self):
self._testWithOptimizer(SyncSamplesOptimizer)
def test_multi_agent_async_gradients_optimizer(self):
self._testWithOptimizer(AsyncGradientsOptimizer)
def test_multi_agent_replay_optimizer(self):
self._testWithOptimizer(SyncReplayOptimizer)
def test_train_multi_cartpole_many_policies(self):
n = 20
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
policies = {}
for i in range(20):
policies["pg_{}".format(i)] = (PGTFPolicy, obs_space, act_space,
{})
policy_ids = list(policies.keys())
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: random.choice(policy_ids),
batch_steps=100)
workers = WorkerSet._from_existing(worker, [])
optimizer = SyncSamplesOptimizer(workers)
for i in range(100):
optimizer.step()
result = collect_metrics(worker)
print("Iteration {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
raise Exception("failed to improve reward")
if __name__ == "__main__":
ray.init(num_cpus=4)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_nested_spaces.py
|
Python
|
import pickle
from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.model import Model
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.rollout import rollout
from ray.rllib.tests.test_external_env import SimpleServing
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
_, nn = try_import_torch()
DICT_SPACE = spaces.Dict({
"sensors": spaces.Dict({
"position": spaces.Box(low=-100, high=100, shape=(3, )),
"velocity": spaces.Box(low=-1, high=1, shape=(3, )),
"front_cam": spaces.Tuple(
(spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
"inner_state": spaces.Dict({
"charge": spaces.Discrete(100),
"job_status": spaces.Dict({
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
})
})
})
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple([
spaces.Box(low=-100, high=100, shape=(3, )),
spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
spaces.Discrete(5),
])
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedMultiAgentEnv(MultiAgentEnv):
def __init__(self):
self.steps = 0
def reset(self):
return {
"dict_agent": DICT_SAMPLES[0],
"tuple_agent": TUPLE_SAMPLES[0],
}
def step(self, actions):
self.steps += 1
obs = {
"dict_agent": DICT_SAMPLES[self.steps],
"tuple_agent": TUPLE_SAMPLES[self.steps],
}
rew = {
"dict_agent": 0,
"tuple_agent": 0,
}
dones = {"__all__": self.steps >= 5}
infos = {
"dict_agent": {},
"tuple_agent": {},
}
return obs, rew, dones, infos
class InvalidModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return "not", "valid"
class InvalidModel2(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return tf.constant(0), tf.constant(0)
class TorchSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space.spaces["sensors"].spaces["position"],
action_space, num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
pos = input_dict["obs"]["sensors"]["position"].numpy()
front_cam = input_dict["obs"]["sensors"]["front_cam"][0].numpy()
task = input_dict["obs"]["inner_state"]["job_status"]["task"].numpy()
ray.experimental.internal_kv._internal_kv_put(
"torch_spy_in_{}".format(TorchSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
TorchSpyModel.capture_index += 1
return self.fc({
"obs": input_dict["obs"]["sensors"]["position"]
}, state, seq_lens)
def value_function(self):
return self.fc.value_function()
class DictSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, front_cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
DictSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"]
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"]["sensors"]["position"],
num_outputs)
return output, output
class TupleSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)),
overwrite=True)
TupleSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"][0], num_outputs)
return output, output
class NestedSpacesTest(unittest.TestCase):
def testInvalidModel(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaises(ValueError, lambda: PGTrainer(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid",
},
}))
def testInvalidModel2(self):
ModelCatalog.register_custom_model("invalid2", InvalidModel2)
self.assertRaisesRegexp(
ValueError, "Expected output.*",
lambda: PGTrainer(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid2",
},
}))
def doTestNestedDict(self, make_env, test_lstm=False):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGTrainer(
env="nested",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
"use_lstm": test_lstm,
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def doTestNestedTuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGTrainer(
env="nested2",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite2",
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testNestedDictGym(self):
self.doTestNestedDict(lambda _: NestedDictEnv())
def testNestedDictGymLSTM(self):
self.doTestNestedDict(lambda _: NestedDictEnv(), test_lstm=True)
def testNestedDictVector(self):
self.doTestNestedDict(
lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))
def testNestedDictServing(self):
self.doTestNestedDict(lambda _: SimpleServing(NestedDictEnv()))
def testNestedDictAsync(self):
self.doTestNestedDict(lambda _: BaseEnv.to_base_env(NestedDictEnv()))
def testNestedTupleGym(self):
self.doTestNestedTuple(lambda _: NestedTupleEnv())
def testNestedTupleVector(self):
self.doTestNestedTuple(
lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))
def testNestedTupleServing(self):
self.doTestNestedTuple(lambda _: SimpleServing(NestedTupleEnv()))
def testNestedTupleAsync(self):
self.doTestNestedTuple(lambda _: BaseEnv.to_base_env(NestedTupleEnv()))
def testMultiAgentComplexSpaces(self):
ModelCatalog.register_custom_model("dict_spy", DictSpyModel)
ModelCatalog.register_custom_model("tuple_spy", TupleSpyModel)
register_env("nested_ma", lambda _: NestedMultiAgentEnv())
act_space = spaces.Discrete(2)
pg = PGTrainer(
env="nested_ma",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"multiagent": {
"policies": {
"tuple_policy": (
PGTFPolicy, TUPLE_SPACE, act_space,
{"model": {"custom_model": "tuple_spy"}}),
"dict_policy": (
PGTFPolicy, DICT_SPACE, act_space,
{"model": {"custom_model": "dict_spy"}}),
},
"policy_mapping_fn": lambda a: {
"tuple_agent": "tuple_policy",
"dict_agent": "dict_policy"}[a],
},
})
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testRolloutDictSpace(self):
register_env("nested", lambda _: NestedDictEnv())
agent = PGTrainer(env="nested")
agent.train()
path = agent.save()
agent.stop()
# Test train works on restore
agent2 = PGTrainer(env="nested")
agent2.restore(path)
agent2.train()
# Test rollout works on restore
rollout(agent2, "nested", 100)
def testPyTorchModel(self):
ModelCatalog.register_custom_model("composite", TorchSpyModel)
register_env("nested", lambda _: NestedDictEnv())
a2c = A2CTrainer(
env="nested",
config={
"num_workers": 0,
"use_pytorch": True,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
},
})
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_optimizers.py
|
Python
|
import gym
import numpy as np
import time
import unittest
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray.rllib.evaluation import SampleBatch
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.optimizers import AsyncGradientsOptimizer, AsyncSamplesOptimizer
from ray.rllib.optimizers.aso_tree_aggregator import TreeAggregator
from ray.rllib.tests.mock_worker import _MockWorker
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class LRScheduleTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testBasic(self):
ray.init(num_cpus=2)
ppo = PPOTrainer(
env="CartPole-v0",
config={"lr_schedule": [[0, 1e-5], [1000, 0.0]]})
for _ in range(10):
result = ppo.train()
assert result["episode_reward_mean"] < 100, "should not have learned"
class AsyncOptimizerTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testBasic(self):
ray.init(num_cpus=4, object_store_memory=1000 * 1024 * 1024)
local = _MockWorker()
remotes = ray.remote(_MockWorker)
remote_workers = [remotes.remote() for i in range(5)]
workers = WorkerSet._from_existing(local, remote_workers)
test_optimizer = AsyncGradientsOptimizer(workers, grads_per_step=10)
test_optimizer.step()
self.assertTrue(all(local.get_weights() == 0))
class PPOCollectTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def testPPOSampleWaste(self):
ray.init(num_cpus=4, object_store_memory=1000 * 1024 * 1024)
# Check we at least collect the initial wave of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 128,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 600)
ppo.stop()
# Check we collect at least the specified amount of samples
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1000)
ppo.stop()
# Check in vectorized mode
ppo = PPOTrainer(
env="CartPole-v0",
config={
"sample_batch_size": 200,
"num_envs_per_worker": 2,
"train_batch_size": 900,
"num_workers": 3,
})
ppo.train()
self.assertEqual(ppo.optimizer.num_steps_sampled, 1200)
ppo.stop()
class SampleBatchTest(unittest.TestCase):
def testConcat(self):
b1 = SampleBatch({"a": np.array([1, 2, 3]), "b": np.array([4, 5, 6])})
b2 = SampleBatch({"a": np.array([1]), "b": np.array([4])})
b3 = SampleBatch({"a": np.array([1]), "b": np.array([5])})
b12 = b1.concat(b2)
self.assertEqual(b12["a"].tolist(), [1, 2, 3, 1])
self.assertEqual(b12["b"].tolist(), [4, 5, 6, 4])
b = SampleBatch.concat_samples([b1, b2, b3])
self.assertEqual(b["a"].tolist(), [1, 2, 3, 1, 1])
self.assertEqual(b["b"].tolist(), [4, 5, 6, 4, 5])
class AsyncSamplesOptimizerTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
ray.shutdown()
@classmethod
def setUpClass(cls):
ray.init(num_cpus=8, object_store_memory=1000 * 1024 * 1024)
def testSimple(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(workers)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPU(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(workers, num_gpus=1, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiGPUParallelLoad(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers, num_gpus=1, num_data_loader_buffers=1, _fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testMultiplePasses(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
sample_batch_size=10,
train_batch_size=50)
self._wait_for(optimizer, 1000, 10000)
self.assertLess(optimizer.stats()["num_steps_sampled"], 5000)
self.assertGreater(optimizer.stats()["num_steps_trained"], 8000)
def testReplay(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10,
)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
self.assertGreater(replay_ratio, 0.7)
self.assertLess(stats["num_steps_trained"], stats["num_steps_sampled"])
def testReplayAndMultiplePasses(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
minibatch_buffer_size=10,
num_sgd_iter=10,
replay_buffer_num_slots=100,
replay_proportion=10,
sample_batch_size=10,
train_batch_size=10)
self._wait_for(optimizer, 1000, 1000)
stats = optimizer.stats()
print(stats)
self.assertLess(stats["num_steps_sampled"], 5000)
replay_ratio = stats["num_steps_replayed"] / stats["num_steps_sampled"]
self.assertGreater(replay_ratio, 0.7)
def testMultiTierAggregationBadConf(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(4)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=4)
self.assertRaises(ValueError,
lambda: optimizer.aggregator.init(aggregators))
def testMultiTierAggregation(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
aggregators = TreeAggregator.precreate_aggregators(1)
optimizer = AsyncSamplesOptimizer(workers, num_aggregation_workers=1)
optimizer.aggregator.init(aggregators)
self._wait_for(optimizer, 1000, 1000)
def testRejectBadConfigs(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
self.assertRaises(
ValueError, lambda: AsyncSamplesOptimizer(
local, remotes,
num_data_loader_buffers=2, minibatch_buffer_size=4))
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=1,
train_batch_size=100,
sample_batch_size=50,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=1,
train_batch_size=100,
sample_batch_size=25,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
optimizer = AsyncSamplesOptimizer(
workers,
num_gpus=1,
train_batch_size=100,
sample_batch_size=74,
_fake_gpus=True)
self._wait_for(optimizer, 1000, 1000)
def testLearnerQueueTimeout(self):
local, remotes = self._make_envs()
workers = WorkerSet._from_existing(local, remotes)
optimizer = AsyncSamplesOptimizer(
workers,
sample_batch_size=1000,
train_batch_size=1000,
learner_queue_timeout=1)
self.assertRaises(AssertionError,
lambda: self._wait_for(optimizer, 1000, 1000))
def _make_envs(self):
def make_sess():
return tf.Session(config=tf.ConfigProto(device_count={"CPU": 2}))
local = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
remotes = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=PPOTFPolicy,
tf_session_creator=make_sess)
]
return local, remotes
def _wait_for(self, optimizer, num_steps_sampled, num_steps_trained):
start = time.time()
while time.time() - start < 30:
optimizer.step()
if optimizer.num_steps_sampled > num_steps_sampled and \
optimizer.num_steps_trained > num_steps_trained:
print("OK", optimizer.stats())
return
raise AssertionError("TIMED OUT", optimizer.stats())
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_perf.py
|
Python
|
import gym
import time
import unittest
import ray
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.tests.test_rollout_worker import MockPolicy
class TestPerf(unittest.TestCase):
# Tested on Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz
# 11/23/18: Samples per second 8501.125113727468
# 03/01/19: Samples per second 8610.164353268685
def testBaselinePerformance(self):
for _ in range(20):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
batch_steps=100)
start = time.time()
count = 0
while time.time() - start < 1:
count += ev.sample().count
print()
print("Samples per second {}".format(
count / (time.time() - start)))
print()
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_reproducibility.py
|
Python
|
import unittest
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.tune.registry import register_env
import numpy as np
import gym
class TestReproducibility(unittest.TestCase):
def testReproducingTrajectory(self):
class PickLargest(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Box(
low=float("-inf"), high=float("inf"), shape=(4, ))
self.action_space = gym.spaces.Discrete(4)
def reset(self, **kwargs):
self.obs = np.random.randn(4)
return self.obs
def step(self, action):
reward = self.obs[action]
return self.obs, reward, True, {}
def env_creator(env_config):
return PickLargest()
trajs = list()
for trial in range(3):
ray.init()
register_env("PickLargest", env_creator)
agent = DQNTrainer(
env="PickLargest",
config={"seed": 666 if trial in [0, 1] else 999})
trajectory = list()
for _ in range(8):
r = agent.train()
trajectory.append(r["episode_reward_max"])
trajectory.append(r["episode_reward_min"])
trajs.append(trajectory)
ray.shutdown()
# trial0 and trial1 use same seed and thus
# expect identical trajectories.
all_same = True
for v0, v1 in zip(trajs[0], trajs[1]):
if v0 != v1:
all_same = False
self.assertTrue(all_same)
# trial1 and trial2 use different seeds and thus
# most rewards tend to be different.
diff_cnt = 0
for v1, v2 in zip(trajs[1], trajs[2]):
if v1 != v2:
diff_cnt += 1
self.assertTrue(diff_cnt > 8)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_rollout.sh
|
Shell
|
#!/bin/bash -e
TRAIN=/ray/rllib/train.py
if [ ! -e "$TRAIN" ]; then
TRAIN=../train.py
fi
ROLLOUT=/ray/rllib/rollout.py
if [ ! -e "$ROLLOUT" ]; then
ROLLOUT=../rollout.py
fi
TMP=`mktemp -d`
echo "Saving results to $TMP"
$TRAIN --local-dir=$TMP --run=IMPALA --checkpoint-freq=1 \
--config='{"num_workers": 1, "num_gpus": 0}' --env=Pong-ram-v4 \
--stop='{"training_iteration": 1}'
find $TMP
CHECKPOINT_PATH=`ls $TMP/default/*/checkpoint_1/checkpoint-1`
echo "Checkpoint path $CHECKPOINT_PATH"
test -e "$CHECKPOINT_PATH"
$ROLLOUT --run=IMPALA "$CHECKPOINT_PATH" --steps=100 \
--out="$TMP/rollouts_100steps.pkl" --no-render
test -e "$TMP/rollouts_100steps.pkl"
$ROLLOUT --run=IMPALA "$CHECKPOINT_PATH" --episodes=1 \
--out="$TMP/rollouts_1episode.pkl" --no-render
test -e "$TMP/rollouts_1episode.pkl"
rm -rf "$TMP"
echo "OK"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_rollout_worker.py
|
Python
|
import gym
import numpy as np
import random
import time
import unittest
from collections import Counter
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.policy.policy import Policy
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.env.vector_env import VectorEnv
from ray.tune.registry import register_env
class MockPolicy(Policy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [random.choice([0, 1])] * len(obs_batch), [], {}
def postprocess_trajectory(self,
batch,
other_agent_batches=None,
episode=None):
assert episode is not None
return compute_advantages(batch, 100.0, 0.9, use_gae=False)
class BadPolicy(MockPolicy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
raise Exception("intentional error")
class FailOnStepEnv(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
raise ValueError("kaboom")
def step(self, action):
raise ValueError("kaboom")
class MockEnv(gym.Env):
def __init__(self, episode_length, config=None):
self.episode_length = episode_length
self.config = config
self.i = 0
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return 0, 1, self.i >= self.episode_length, {}
class MockEnv2(gym.Env):
def __init__(self, episode_length):
self.episode_length = episode_length
self.i = 0
self.observation_space = gym.spaces.Discrete(100)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return self.i, 100, self.i >= self.episode_length, {}
class MockVectorEnv(VectorEnv):
def __init__(self, episode_length, num_envs):
self.envs = [MockEnv(episode_length) for _ in range(num_envs)]
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
self.num_envs = num_envs
def vector_reset(self):
return [e.reset() for e in self.envs]
def reset_at(self, index):
return self.envs[index].reset()
def vector_step(self, actions):
obs_batch, rew_batch, done_batch, info_batch = [], [], [], []
for i in range(len(self.envs)):
obs, rew, done, info = self.envs[i].step(actions[i])
obs_batch.append(obs)
rew_batch.append(rew)
done_batch.append(done)
info_batch.append(info)
return obs_batch, rew_batch, done_batch, info_batch
def get_unwrapped(self):
return self.envs
class TestRolloutWorker(unittest.TestCase):
def test_basic(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch = ev.sample()
for key in [
"obs", "actions", "rewards", "dones", "advantages",
"prev_rewards", "prev_actions"
]:
self.assertIn(key, batch)
self.assertGreater(np.abs(np.mean(batch[key])), 0)
def to_prev(vec):
out = np.zeros_like(vec)
for i, v in enumerate(vec):
if i + 1 < len(out) and not batch["dones"][i]:
out[i + 1] = v
return out.tolist()
self.assertEqual(batch["prev_rewards"].tolist(),
to_prev(batch["rewards"]))
self.assertEqual(batch["prev_actions"].tolist(),
to_prev(batch["actions"]))
self.assertGreater(batch["advantages"][0], 1)
def test_batch_ids(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch1 = ev.sample()
batch2 = ev.sample()
self.assertEqual(len(set(batch1["unroll_id"])), 1)
self.assertEqual(len(set(batch2["unroll_id"])), 1)
self.assertEqual(
len(set(SampleBatch.concat(batch1, batch2)["unroll_id"])), 2)
def test_global_vars_update(self):
agent = A2CTrainer(
env="CartPole-v0",
config={
"lr_schedule": [[0, 0.1], [400, 0.000001]],
})
result = agent.train()
self.assertGreater(result["info"]["learner"]["cur_lr"], 0.01)
result2 = agent.train()
self.assertLess(result2["info"]["learner"]["cur_lr"], 0.0001)
def test_no_step_on_init(self):
register_env("fail", lambda _: FailOnStepEnv())
pg = PGTrainer(env="fail", config={"num_workers": 1})
self.assertRaises(Exception, lambda: pg.train())
def test_callbacks(self):
counts = Counter()
pg = PGTrainer(
env="CartPole-v0", config={
"num_workers": 0,
"sample_batch_size": 50,
"train_batch_size": 50,
"callbacks": {
"on_episode_start": lambda x: counts.update({"start": 1}),
"on_episode_step": lambda x: counts.update({"step": 1}),
"on_episode_end": lambda x: counts.update({"end": 1}),
"on_sample_end": lambda x: counts.update({"sample": 1}),
},
})
pg.train()
pg.train()
pg.train()
pg.train()
self.assertEqual(counts["sample"], 4)
self.assertGreater(counts["start"], 0)
self.assertGreater(counts["end"], 0)
self.assertGreater(counts["step"], 200)
self.assertLess(counts["step"], 400)
def test_query_evaluators(self):
register_env("test", lambda _: gym.make("CartPole-v0"))
pg = PGTrainer(
env="test",
config={
"num_workers": 2,
"sample_batch_size": 5,
"num_envs_per_worker": 2,
})
results = pg.workers.foreach_worker(lambda ev: ev.sample_batch_size)
results2 = pg.workers.foreach_worker_with_index(
lambda ev, i: (i, ev.sample_batch_size))
results3 = pg.workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: 1))
self.assertEqual(results, [10, 10, 10])
self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])
self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])
def test_reward_clipping(self):
# clipping on
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=True,
batch_mode="complete_episodes")
self.assertEqual(max(ev.sample()["rewards"]), 1)
result = collect_metrics(ev, [])
self.assertEqual(result["episode_reward_mean"], 1000)
# clipping off
ev2 = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=False,
batch_mode="complete_episodes")
self.assertEqual(max(ev2.sample()["rewards"]), 100)
result2 = collect_metrics(ev2, [])
self.assertEqual(result2["episode_reward_mean"], 1000)
def test_hard_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
batch_steps=10,
episode_horizon=4,
soft_horizon=False)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# 3 done values
self.assertEqual(sum(samples["dones"]), 3)
def test_soft_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
batch_steps=10,
episode_horizon=4,
soft_horizon=True)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# only 1 hard done value
self.assertEqual(sum(samples["dones"]), 1)
def test_metrics(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
remote_ev = RolloutWorker.as_remote().remote(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
ev.sample()
ray.get(remote_ev.sample.remote())
result = collect_metrics(ev, [remote_ev])
self.assertEqual(result["episodes_this_iter"], 20)
self.assertEqual(result["episode_reward_mean"], 10)
def test_async(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
sample_async=True,
policy=MockPolicy)
batch = ev.sample()
for key in ["obs", "actions", "rewards", "dones", "advantages"]:
self.assertIn(key, batch)
self.assertGreater(batch["advantages"][0], 1)
def test_auto_vectorization(self):
ev = RolloutWorker(
env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=2,
num_envs=8)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
indices = []
for env in ev.async_env.vector_env.envs:
self.assertEqual(env.unwrapped.config.worker_index, 0)
indices.append(env.unwrapped.config.vector_index)
self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])
def test_batches_larger_when_vectorized(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=4,
num_envs=4)
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
batch = ev.sample()
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 4)
def test_vector_env_support(self):
ev = RolloutWorker(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=10)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
def test_truncate_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=15,
batch_mode="truncate_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 15)
def test_complete_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=5,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 10)
def test_complete_episodes_packing(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=15,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 20)
self.assertEqual(
batch["t"].tolist(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_filter_sync(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
def test_get_filters(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
self.sample_and_flush(ev)
filters = ev.get_filters(flush_after=False)
time.sleep(2)
filters2 = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
obs_f2 = filters2[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)
self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)
def test_sync_filter(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
obs_f = self.sample_and_flush(ev)
# Current State
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertLessEqual(obs_f.buffer.n, 20)
new_obsf = obs_f.copy()
new_obsf.rs._n = 100
ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f.rs.n, 100)
self.assertLessEqual(obs_f.buffer.n, 20)
def sample_and_flush(self, ev):
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
return obs_f
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/tests/test_supported_spaces.py
|
Python
|
import unittest
import traceback
import gym
from gym.spaces import Box, Discrete, Tuple, Dict, MultiDiscrete
from gym.envs.registration import EnvSpec
import numpy as np
import sys
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork as FCNetV2
from ray.rllib.models.tf.visionnet_v2 import VisionNetwork as VisionNetV2
from ray.rllib.tests.test_multi_agent_env import (MultiCartpole,
MultiMountainCar)
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.tune.registry import register_env
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"vector2": Box(-1.0, 1.0, (
5,
5,
), dtype=np.float32),
"multidiscrete": MultiDiscrete([1, 2, 3, 4]),
"tuple": Tuple(
[Discrete(2),
Discrete(3),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
}
OBSERVATION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"vector2": Box(-1.0, 1.0, (5, 5), dtype=np.float32),
"image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
"atari": Box(-1.0, 1.0, (210, 160, 3), dtype=np.float32),
"tuple": Tuple([Discrete(10),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
"dict": Dict({
"task": Discrete(10),
"position": Box(-1.0, 1.0, (5, ), dtype=np.float32),
}),
}
def make_stub_env(action_space, obs_space, check_action_bounds):
class StubEnv(gym.Env):
def __init__(self):
self.action_space = action_space
self.observation_space = obs_space
self.spec = EnvSpec("StubEnv-v0")
def reset(self):
sample = self.observation_space.sample()
return sample
def step(self, action):
if check_action_bounds and not self.action_space.contains(action):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
if (isinstance(self.action_space, Tuple)
and len(action) != len(self.action_space.spaces)):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
return self.observation_space.sample(), 1, True, {}
return StubEnv
def check_support(alg, config, stats, check_bounds=False, name=None):
covered_a = set()
covered_o = set()
config["log_level"] = "ERROR"
for a_name, action_space in ACTION_SPACES_TO_TEST.items():
for o_name, obs_space in OBSERVATION_SPACES_TO_TEST.items():
print("=== Testing", alg, action_space, obs_space, "===")
stub_env = make_stub_env(action_space, obs_space, check_bounds)
register_env("stub_env", lambda c: stub_env())
stat = "ok"
a = None
try:
if a_name in covered_a and o_name in covered_o:
stat = "skip" # speed up tests by avoiding full grid
else:
a = get_agent_class(alg)(config=config, env="stub_env")
if alg not in ["DDPG", "ES", "ARS"]:
if o_name in ["atari", "image"]:
assert isinstance(a.get_policy().model,
VisionNetV2)
elif o_name in ["vector", "vector2"]:
assert isinstance(a.get_policy().model, FCNetV2)
a.train()
covered_a.add(a_name)
covered_o.add(o_name)
except UnsupportedSpaceException:
stat = "unsupported"
except Exception as e:
stat = "ERROR"
print(e)
print(traceback.format_exc())
finally:
if a:
try:
a.stop()
except Exception as e:
print("Ignoring error stopping agent", e)
pass
print(stat)
print()
stats[name or alg, a_name, o_name] = stat
def check_support_multiagent(alg, config):
register_env("multi_mountaincar", lambda _: MultiMountainCar(2))
register_env("multi_cartpole", lambda _: MultiCartpole(2))
config["log_level"] = "ERROR"
if "DDPG" in alg:
a = get_agent_class(alg)(config=config, env="multi_mountaincar")
else:
a = get_agent_class(alg)(config=config, env="multi_cartpole")
try:
a.train()
finally:
a.stop()
class ModelSupportedSpaces(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4)
def tearDown(self):
ray.shutdown()
def testAll(self):
stats = {}
check_support("IMPALA", {"num_gpus": 0}, stats)
check_support("APPO", {"num_gpus": 0, "vtrace": False}, stats)
check_support(
"APPO", {
"num_gpus": 0,
"vtrace": True
}, stats, name="APPO-vt")
check_support(
"DDPG", {
"exploration_ou_noise_scale": 100.0,
"timesteps_per_iteration": 1,
"use_state_preprocessor": True,
},
stats,
check_bounds=True)
check_support("DQN", {"timesteps_per_iteration": 1}, stats)
check_support(
"A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
},
stats,
check_bounds=True)
check_support(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
},
stats,
check_bounds=True)
check_support(
"ES", {
"num_workers": 1,
"noise_size": 10000000,
"episodes_per_batch": 1,
"train_batch_size": 1
}, stats)
check_support(
"ARS", {
"num_workers": 1,
"noise_size": 10000000,
"num_rollouts": 1,
"rollouts_used": 1
}, stats)
check_support(
"PG", {
"num_workers": 1,
"optimizer": {}
},
stats,
check_bounds=True)
num_unexpected_errors = 0
for (alg, a_name, o_name), stat in sorted(stats.items()):
if stat not in ["ok", "unsupported", "skip"]:
num_unexpected_errors += 1
print(alg, "action_space", a_name, "obs_space", o_name, "result",
stat)
self.assertEqual(num_unexpected_errors, 0)
def testMultiAgent(self):
check_support_multiagent(
"APEX", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
})
check_support_multiagent(
"APEX_DDPG", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
"use_state_preprocessor": True,
})
check_support_multiagent("IMPALA", {"num_gpus": 0})
check_support_multiagent("DQN", {"timesteps_per_iteration": 1})
check_support_multiagent("A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
})
check_support_multiagent(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
})
check_support_multiagent("PG", {"num_workers": 1, "optimizer": {}})
check_support_multiagent("DDPG", {
"timesteps_per_iteration": 1,
"use_state_preprocessor": True,
})
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--smoke":
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
}
OBSERVATION_SPACES_TO_TEST = {
"vector": Box(0.0, 1.0, (5, ), dtype=np.float32),
"atari": Box(0.0, 1.0, (210, 160, 3), dtype=np.float32),
}
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/train.py
|
Python
|
#!/usr/bin/env python
import argparse
import yaml
import ray
from ray.cluster_utils import Cluster
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.resources import resources_to_json
from ray.tune.tune import _make_scheduler, run_experiments
from ray.rllib.utils.framework import try_import_tf, try_import_torch
# Try to import both backends for flag checking/warnings.
tf = try_import_tf()
torch, _ = try_import_torch()
EXAMPLE_USAGE = """
Training example via RLlib CLI:
rllib train --run DQN --env CartPole-v0
Grid search example via RLlib CLI:
rllib train -f tuned_examples/cartpole-grid-search-example.yaml
Grid search example via executable:
./train.py -f tuned_examples/cartpole-grid-search-example.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.")
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.")
parser.add_argument(
"--ray-redis-max-memory",
default=None,
type=int,
help="--redis-max-memory to use if starting a new cluster.")
parser.add_argument(
"--ray-memory",
default=None,
type=int,
help="--memory to use if starting a new cluster.")
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.")
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging.")
parser.add_argument(
"-vv", action="store_true", help="Whether to use DEBUG level logging.")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.")
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.")
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.")
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.")
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use.")
parser.add_argument(
"--queue-trials",
action="store_true",
help=(
"Whether to queue trials when the cluster does not currently have "
"enough resources to launch one. This should be set to True when "
"running on an autoscaling cluster to enable automatic scale-up."))
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.")
return parser
def run(args, parser):
if args.config_file:
with open(args.config_file) as f:
experiments = yaml.safe_load(f)
else:
# Note: keep this in sync with tune/config_parser.py
experiments = {
args.experiment_name: { # i.e. log to ~/ray_results/default
"run": args.run,
"checkpoint_freq": args.checkpoint_freq,
"keep_checkpoints_num": args.keep_checkpoints_num,
"checkpoint_score_attr": args.checkpoint_score_attr,
"local_dir": args.local_dir,
"resources_per_trial": (
args.resources_per_trial and
resources_to_json(args.resources_per_trial)),
"stop": args.stop,
"config": dict(args.config, env=args.env),
"restore": args.restore,
"num_samples": args.num_samples,
"upload_dir": args.upload_dir,
}
}
verbose = 1
for exp in experiments.values():
if not exp.get("run"):
parser.error("the following arguments are required: --run")
if not exp.get("env") and not exp.get("config", {}).get("env"):
parser.error("the following arguments are required: --env")
if args.eager:
exp["config"]["eager"] = True
if args.torch:
exp["config"]["use_pytorch"] = True
if args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
if args.trace:
if not exp["config"].get("eager"):
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if args.ray_num_nodes:
cluster = Cluster()
for _ in range(args.ray_num_nodes):
cluster.add_node(
num_cpus=args.ray_num_cpus or 1,
num_gpus=args.ray_num_gpus or 0,
object_store_memory=args.ray_object_store_memory,
memory=args.ray_memory,
redis_max_memory=args.ray_redis_max_memory)
ray.init(address=cluster.address)
else:
ray.init(
address=args.ray_address,
object_store_memory=args.ray_object_store_memory,
memory=args.ray_memory,
redis_max_memory=args.ray_redis_max_memory,
num_cpus=args.ray_num_cpus,
num_gpus=args.ray_num_gpus)
run_experiments(
experiments,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/__init__.py
|
Python
|
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils.framework import try_import_tf, try_import_tfp, \
try_import_torch
from ray.rllib.utils.deprecation import deprecation_warning, renamed_agent, \
renamed_class, renamed_function
from ray.rllib.utils.filter_manager import FilterManager
from ray.rllib.utils.filter import Filter
from ray.rllib.utils.numpy import sigmoid, softmax, relu, one_hot, fc, lstm, \
SMALL_NUMBER, LARGE_INTEGER
from ray.rllib.utils.policy_client import PolicyClient
from ray.rllib.utils.policy_server import PolicyServer
from ray.rllib.utils.test_utils import check
from ray.tune.utils import merge_dicts, deep_update
def add_mixins(base, mixins):
"""Returns a new class with mixins applied in priority order."""
mixins = list(mixins or [])
while mixins:
class new_base(mixins.pop(), base):
pass
base = new_base
return base
__all__ = [
"add_mixins",
"check",
"deprecation_warning",
"fc",
"lstm",
"one_hot",
"relu",
"sigmoid",
"softmax",
"deep_update",
"merge_dicts",
"override",
"renamed_function",
"renamed_agent",
"renamed_class",
"try_import_tf",
"try_import_tfp",
"try_import_torch",
"DeveloperAPI",
"Filter",
"FilterManager",
"LARGE_INTEGER",
"PolicyClient",
"PolicyServer",
"PublicAPI",
"SMALL_NUMBER",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/actors.py
|
Python
|
import logging
import os
import ray
logger = logging.getLogger(__name__)
class TaskPool:
"""Helper class for tracking the status of many in-flight actor tasks."""
def __init__(self):
self._tasks = {}
self._objects = {}
self._fetching = []
def add(self, worker, all_obj_ids):
if isinstance(all_obj_ids, list):
obj_id = all_obj_ids[0]
else:
obj_id = all_obj_ids
self._tasks[obj_id] = worker
self._objects[obj_id] = all_obj_ids
def completed(self, blocking_wait=False):
pending = list(self._tasks)
if pending:
ready, _ = ray.wait(pending, num_returns=len(pending), timeout=0)
if not ready and blocking_wait:
ready, _ = ray.wait(pending, num_returns=1, timeout=10.0)
for obj_id in ready:
yield (self._tasks.pop(obj_id), self._objects.pop(obj_id))
def completed_prefetch(self, blocking_wait=False, max_yield=999):
"""Similar to completed but only returns once the object is local.
Assumes obj_id only is one id."""
for worker, obj_id in self.completed(blocking_wait=blocking_wait):
self._fetching.append((worker, obj_id))
remaining = []
num_yielded = 0
for worker, obj_id in self._fetching:
if num_yielded < max_yield:
yield (worker, obj_id)
num_yielded += 1
else:
remaining.append((worker, obj_id))
self._fetching = remaining
def reset_workers(self, workers):
"""Notify that some workers may be removed."""
for obj_id, ev in self._tasks.copy().items():
if ev not in workers:
del self._tasks[obj_id]
del self._objects[obj_id]
ok = []
for ev, obj_id in self._fetching:
if ev in workers:
ok.append((ev, obj_id))
self._fetching = ok
@property
def count(self):
return len(self._tasks)
def drop_colocated(actors):
colocated, non_colocated = split_colocated(actors)
for a in colocated:
a.__ray_terminate__.remote()
return non_colocated
def split_colocated(actors):
localhost = os.uname()[1]
hosts = ray.get([a.get_host.remote() for a in actors])
local = []
non_local = []
for host, a in zip(hosts, actors):
if host == localhost:
local.append(a)
else:
non_local.append(a)
return local, non_local
def try_create_colocated(cls, args, count):
actors = [cls.remote(*args) for _ in range(count)]
local, rest = split_colocated(actors)
logger.info("Got {} colocated actors of {}".format(len(local), count))
for a in rest:
a.__ray_terminate__.remote()
return local
def create_colocated(cls, args, count):
logger.info("Trying to create {} colocated actors".format(count))
ok = []
i = 1
while len(ok) < count and i < 10:
attempt = try_create_colocated(cls, args, count * i)
ok.extend(attempt)
i += 1
if len(ok) < count:
raise Exception("Unable to create enough colocated actors, abort.")
for a in ok[count:]:
a.__ray_terminate__.remote()
return ok[:count]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/annotations.py
|
Python
|
def override(cls):
"""Annotation for documenting method overrides.
Arguments:
cls (type): The superclass that provides the overriden method. If this
cls does not actually have the method, an error is raised.
"""
def check_override(method):
if method.__name__ not in dir(cls):
raise NameError("{} does not override any method of {}".format(
method, cls))
return method
return check_override
def PublicAPI(obj):
"""Annotation for documenting public APIs.
Public APIs are classes and methods exposed to end users of RLlib. You
can expect these APIs to remain stable across RLlib releases.
Subclasses that inherit from a ``@PublicAPI`` base class can be
assumed part of the RLlib public API as well (e.g., all trainer classes
are in public API because Trainer is ``@PublicAPI``).
In addition, you can assume all trainer configurations are part of their
public API as well.
"""
return obj
def DeveloperAPI(obj):
"""Annotation for documenting developer APIs.
Developer APIs are classes and methods explicitly exposed to developers
for the purposes of building custom algorithms or advanced training
strategies on top of RLlib internals. You can generally expect these APIs
to be stable sans minor changes (but less stable than public APIs).
Subclasses that inherit from a ``@DeveloperAPI`` base class can be
assumed part of the RLlib developer API as well (e.g., all policy
optimizers are developer API because PolicyOptimizer is ``@DeveloperAPI``).
"""
return obj
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/compression.py
|
Python
|
from ray.rllib.utils.annotations import DeveloperAPI
import logging
import time
import base64
import numpy as np
import pyarrow
from six import string_types
logger = logging.getLogger(__name__)
try:
import lz4.frame
LZ4_ENABLED = True
except ImportError:
logger.warning("lz4 not available, disabling sample compression. "
"This will significantly impact RLlib performance. "
"To install lz4, run `pip install lz4`.")
LZ4_ENABLED = False
@DeveloperAPI
def compression_supported():
return LZ4_ENABLED
@DeveloperAPI
def pack(data):
if LZ4_ENABLED:
data = pyarrow.serialize(data).to_buffer().to_pybytes()
data = lz4.frame.compress(data)
# TODO(ekl) we shouldn't need to base64 encode this data, but this
# seems to not survive a transfer through the object store if we don't.
data = base64.b64encode(data).decode("ascii")
return data
@DeveloperAPI
def pack_if_needed(data):
if isinstance(data, np.ndarray):
data = pack(data)
return data
@DeveloperAPI
def unpack(data):
if LZ4_ENABLED:
data = base64.b64decode(data)
data = lz4.frame.decompress(data)
data = pyarrow.deserialize(data)
return data
@DeveloperAPI
def unpack_if_needed(data):
if is_compressed(data):
data = unpack(data)
return data
@DeveloperAPI
def is_compressed(data):
return isinstance(data, bytes) or isinstance(data, string_types)
# Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz
# Compression speed: 753.664 MB/s
# Compression ratio: 87.4839812046
# Decompression speed: 910.9504 MB/s
if __name__ == "__main__":
size = 32 * 80 * 80 * 4
data = np.ones(size).reshape((32, 80, 80, 4))
count = 0
start = time.time()
while time.time() - start < 1:
pack(data)
count += 1
compressed = pack(data)
print("Compression speed: {} MB/s".format(count * size * 4 / 1e6))
print("Compression ratio: {}".format(round(size * 4 / len(compressed), 2)))
count = 0
start = time.time()
while time.time() - start < 1:
unpack(compressed)
count += 1
print("Decompression speed: {} MB/s".format(count * size * 4 / 1e6))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/debug.py
|
Python
|
import numpy as np
import pprint
import time
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
_logged = set()
_disabled = False
_periodic_log = False
_last_logged = 0.0
_printer = pprint.PrettyPrinter(indent=2, width=60)
def log_once(key):
"""Returns True if this is the "first" call for a given key.
Various logging settings can adjust the definition of "first".
Example:
>>> if log_once("some_key"):
... logger.info("Some verbose logging statement")
"""
global _last_logged
if _disabled:
return False
elif key not in _logged:
_logged.add(key)
_last_logged = time.time()
return True
elif _periodic_log and time.time() - _last_logged > 60.0:
_logged.clear()
_last_logged = time.time()
return False
else:
return False
def disable_log_once_globally():
"""Make log_once() return False in this process."""
global _disabled
_disabled = True
def enable_periodic_logging():
"""Make log_once() periodically return True in this process."""
global _periodic_log
_periodic_log = True
def summarize(obj):
"""Return a pretty-formatted string for an object.
This has special handling for pretty-formatting of commonly used data types
in RLlib, such as SampleBatch, numpy arrays, etc.
"""
return _printer.pformat(_summarize(obj))
def _summarize(obj):
if isinstance(obj, dict):
return {k: _summarize(v) for k, v in obj.items()}
elif hasattr(obj, "_asdict"):
return {
"type": obj.__class__.__name__,
"data": _summarize(obj._asdict()),
}
elif isinstance(obj, list):
return [_summarize(x) for x in obj]
elif isinstance(obj, tuple):
return tuple(_summarize(x) for x in obj)
elif isinstance(obj, np.ndarray):
if obj.size == 0:
return _StringValue("np.ndarray({}, dtype={})".format(
obj.shape, obj.dtype))
elif obj.dtype == np.object:
return _StringValue("np.ndarray({}, dtype={}, head={})".format(
obj.shape, obj.dtype, _summarize(obj[0])))
else:
return _StringValue(
"np.ndarray({}, dtype={}, min={}, max={}, mean={})".format(
obj.shape, obj.dtype, round(float(np.min(obj)), 3),
round(float(np.max(obj)), 3), round(
float(np.mean(obj)), 3)))
elif isinstance(obj, MultiAgentBatch):
return {
"type": "MultiAgentBatch",
"policy_batches": _summarize(obj.policy_batches),
"count": obj.count,
}
elif isinstance(obj, SampleBatch):
return {
"type": "SampleBatch",
"data": {k: _summarize(v)
for k, v in obj.items()},
}
else:
return obj
class _StringValue:
def __init__(self, value):
self.value = value
def __repr__(self):
return self.value
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/deprecation.py
|
Python
|
import logging
logger = logging.getLogger(__name__)
def deprecation_warning(old, new=None):
logger.warning(
"DeprecationWarning: `{}` has been deprecated.".format(old) +
(" Use `{}` instead." if new else "") +
" This will raise an error in the future!"
)
def renamed_class(cls, old_name):
"""Helper class for renaming classes with a warning."""
class DeprecationWrapper(cls):
# note: **kw not supported for ray.remote classes
def __init__(self, *args, **kw):
new_name = cls.__module__ + "." + cls.__name__
deprecation_warning(old_name, new_name)
cls.__init__(self, *args, **kw)
DeprecationWrapper.__name__ = cls.__name__
return DeprecationWrapper
def renamed_agent(cls):
"""Helper class for renaming Agent => Trainer with a warning."""
class DeprecationWrapper(cls):
def __init__(self, config=None, env=None, logger_creator=None):
old_name = cls.__name__.replace("Trainer", "Agent")
new_name = cls.__module__ + "." + cls.__name__
deprecation_warning(old_name, new_name)
cls.__init__(self, config, env, logger_creator)
DeprecationWrapper.__name__ = cls.__name__
return DeprecationWrapper
def renamed_function(func, old_name):
"""Helper function for renaming a function."""
def deprecation_wrapper(*args, **kwargs):
new_name = func.__module__ + "." + func.__name__
deprecation_warning(old_name, new_name)
return func(*args, **kwargs)
deprecation_wrapper.__name__ = func.__name__
return deprecation_wrapper
def moved_function(func):
new_location = func.__module__
deprecation_warning("import {}".format(func.__name__), "import {}".
format(new_location))
return func
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/error.py
|
Python
|
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class UnsupportedSpaceException(Exception):
"""Error for an unsupported action or observation space."""
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/explained_variance.py
|
Python
|
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
torch, nn = try_import_torch()
def explained_variance(y, pred, framework="tf"):
if framework == "tf":
_, y_var = tf.nn.moments(y, axes=[0])
_, diff_var = tf.nn.moments(y - pred, axes=[0])
return tf.maximum(-1.0, 1 - (diff_var / y_var))
else:
y_var = torch.var(y, dim=[0])
diff_var = torch.var(y - pred, dim=[0])
return max(-1.0, 1 - (diff_var / y_var))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/filter.py
|
Python
|
import logging
import numpy as np
import threading
logger = logging.getLogger(__name__)
class Filter:
"""Processes input, possibly statefully."""
def apply_changes(self, other, *args, **kwargs):
"""Updates self with "new state" from other filter."""
raise NotImplementedError
def copy(self):
"""Creates a new object with same state as self.
Returns:
A copy of self.
"""
raise NotImplementedError
def sync(self, other):
"""Copies all state from other filter to self."""
raise NotImplementedError
def clear_buffer(self):
"""Creates copy of current state and clears accumulated state"""
raise NotImplementedError
def as_serializable(self):
raise NotImplementedError
class NoFilter(Filter):
is_concurrent = True
def __init__(self, *args):
pass
def __call__(self, x, update=True):
try:
return np.asarray(x)
except Exception:
raise ValueError("Failed to convert to array", x)
def apply_changes(self, other, *args, **kwargs):
pass
def copy(self):
return self
def sync(self, other):
pass
def clear_buffer(self):
pass
def as_serializable(self):
return self
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat:
def __init__(self, shape=None):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def copy(self):
other = RunningStat()
other._n = self._n
other._M = np.copy(self._M)
other._S = np.copy(self._S)
return other
def push(self, x):
x = np.asarray(x)
# Unvectorized update of the running statistics.
if x.shape != self._M.shape:
raise ValueError(
"Unexpected input shape {}, expected {}, value = {}".format(
x.shape, self._M.shape, x))
n1 = self._n
self._n += 1
if self._n == 1:
self._M[...] = x
else:
delta = x - self._M
self._M[...] += delta / self._n
self._S[...] += delta * delta * n1 / self._n
def update(self, other):
n1 = self._n
n2 = other._n
n = n1 + n2
if n == 0:
# Avoid divide by zero, which creates nans
return
delta = self._M - other._M
delta2 = delta * delta
M = (n1 * self._M + n2 * other._M) / n
S = self._S + other._S + delta2 * n1 * n2 / n
self._n = n
self._M = M
self._S = S
def __repr__(self):
return "(n={}, mean_mean={}, mean_std={})".format(
self.n, np.mean(self.mean), np.mean(self.std))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class MeanStdFilter(Filter):
"""Keeps track of a running mean for seen states"""
is_concurrent = False
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.shape = shape
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
# In distributed rollouts, each worker sees different states.
# The buffer is used to keep track of deltas amongst all the
# observation filters.
self.buffer = RunningStat(shape)
def clear_buffer(self):
self.buffer = RunningStat(self.shape)
def apply_changes(self, other, with_buffer=False):
"""Applies updates from the buffer of another filter.
Params:
other (MeanStdFilter): Other filter to apply info from
with_buffer (bool): Flag for specifying if the buffer should be
copied from other.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, 1.5, 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> a.apply_changes(b, with_buffer=False)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[3, 4.333333333333333, 2]
>>> a.apply_changes(b, with_buffer=True)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[4, 5.75, 1]
"""
self.rs.update(other.buffer)
if with_buffer:
self.buffer = other.buffer.copy()
def copy(self):
"""Returns a copy of Filter."""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def as_serializable(self):
return self.copy()
def sync(self, other):
"""Syncs all fields together from other filter.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, array(1.5), 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> print([b.rs.n, b.rs.mean, b.buffer.n])
[1, array(10.0), 1]
>>> a.sync(b)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[1, array(10.0), 1]
"""
assert other.shape == self.shape, "Shapes don't match!"
self.demean = other.demean
self.destd = other.destd
self.clip = other.clip
self.rs = other.rs.copy()
self.buffer = other.buffer.copy()
def __call__(self, x, update=True):
x = np.asarray(x)
if update:
if len(x.shape) == len(self.rs.shape) + 1:
# The vectorized case.
for i in range(x.shape[0]):
self.rs.push(x[i])
self.buffer.push(x[i])
else:
# The unvectorized case.
self.rs.push(x)
self.buffer.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def __repr__(self):
return "MeanStdFilter({}, {}, {}, {}, {}, {})".format(
self.shape, self.demean, self.destd, self.clip, self.rs,
self.buffer)
class ConcurrentMeanStdFilter(MeanStdFilter):
is_concurrent = True
def __init__(self, *args, **kwargs):
super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs)
self._lock = threading.RLock()
def lock_wrap(func):
def wrapper(*args, **kwargs):
with self._lock:
return func(*args, **kwargs)
return wrapper
self.__getattribute__ = lock_wrap(self.__getattribute__)
def as_serializable(self):
"""Returns non-concurrent version of current class"""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def copy(self):
"""Returns a copy of Filter."""
other = ConcurrentMeanStdFilter(self.shape)
other.sync(self)
return other
def __repr__(self):
return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format(
self.shape, self.demean, self.destd, self.clip, self.rs,
self.buffer)
def get_filter(filter_config, shape):
# TODO(rliaw): move this into filter manager
if filter_config == "MeanStdFilter":
return MeanStdFilter(shape, clip=None)
elif filter_config == "ConcurrentMeanStdFilter":
return ConcurrentMeanStdFilter(shape, clip=None)
elif filter_config == "NoFilter":
return NoFilter()
else:
raise Exception("Unknown observation_filter: " + str(filter_config))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/filter_manager.py
|
Python
|
import ray
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.memory import ray_get_and_free
@DeveloperAPI
class FilterManager:
"""Manages filters and coordination across remote evaluators that expose
`get_filters` and `sync_filters`.
"""
@staticmethod
@DeveloperAPI
def synchronize(local_filters, remotes, update_remote=True):
"""Aggregates all filters from remote evaluators.
Local copy is updated and then broadcasted to all remote evaluators.
Args:
local_filters (dict): Filters to be synchronized.
remotes (list): Remote evaluators with filters.
update_remote (bool): Whether to push updates to remote filters.
"""
remote_filters = ray_get_and_free(
[r.get_filters.remote(flush_after=True) for r in remotes])
for rf in remote_filters:
for k in local_filters:
local_filters[k].apply_changes(rf[k], with_buffer=False)
if update_remote:
copies = {k: v.as_serializable() for k, v in local_filters.items()}
remote_copy = ray.put(copies)
[r.sync_filters.remote(remote_copy) for r in remotes]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/framework.py
|
Python
|
import logging
import os
logger = logging.getLogger(__name__)
def try_import_tf():
"""
Returns:
The tf module (either from tf2.0.compat.v1 OR as tf1.x.
"""
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None
try:
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
tf.disable_v2_behavior()
return tf
except ImportError:
try:
import tensorflow as tf
return tf
except ImportError:
return None
def try_import_tfp():
"""
Returns:
The tfp module.
"""
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow Probability for test "
"purposes.")
return None
try:
import tensorflow_probability as tfp
return tfp
except ImportError:
return None
def try_import_torch():
"""
Returns:
tuple: torch AND torch.nn modules.
"""
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
logger.warning("Not importing Torch for test purposes.")
return None, None
try:
import torch
import torch.nn as nn
return torch, nn
except ImportError:
return None, None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/memory.py
|
Python
|
import numpy as np
import time
import ray
FREE_DELAY_S = 10.0
MAX_FREE_QUEUE_SIZE = 100
_last_free_time = 0.0
_to_free = []
def ray_get_and_free(object_ids):
"""Call ray.get and then queue the object ids for deletion.
This function should be used whenever possible in RLlib, to optimize
memory usage. The only exception is when an object_id is shared among
multiple readers.
Args:
object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
Returns:
The result of ray.get(object_ids).
"""
global _last_free_time
global _to_free
result = ray.get(object_ids)
if type(object_ids) is not list:
object_ids = [object_ids]
_to_free.extend(object_ids)
# batch calls to free to reduce overheads
now = time.time()
if (len(_to_free) > MAX_FREE_QUEUE_SIZE
or now - _last_free_time > FREE_DELAY_S):
ray.internal.free(_to_free)
_to_free = []
_last_free_time = now
return result
def aligned_array(size, dtype, align=64):
"""Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
"""
n = size * dtype.itemsize
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
if n == 0:
# stop np from optimising out empty slice reference
output = empty[offset:offset + 1][0:0].view(dtype)
else:
output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
return output
def concat_aligned(items):
"""Concatenate arrays, ensuring the output is 64-byte aligned.
We only align float arrays; other arrays are concatenated as normal.
This should be used instead of np.concatenate() to improve performance
when the output array is likely to be fed into TensorFlow.
"""
if len(items) == 0:
return []
elif len(items) == 1:
# we assume the input is aligned. In any case, it doesn't help
# performance to force align it since that incurs a needless copy.
return items[0]
elif (isinstance(items[0], np.ndarray)
and items[0].dtype in [np.float32, np.float64, np.uint8]):
dtype = items[0].dtype
flat = aligned_array(sum(s.size for s in items), dtype)
batch_dim = sum(s.shape[0] for s in items)
new_shape = (batch_dim, ) + items[0].shape[1:]
output = flat.reshape(new_shape)
assert output.ctypes.data % 64 == 0, output.ctypes.data
np.concatenate(items, out=output)
return output
else:
return np.concatenate(items)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/numpy.py
|
Python
|
import numpy as np
SMALL_NUMBER = 1e-6
# Some large int number. May be increased here, if needed.
LARGE_INTEGER = 100000000
# Min and Max outputs (clipped) from an NN-output layer interpreted as the
# log(x) of some x (e.g. a stddev of a normal
# distribution).
MIN_LOG_NN_OUTPUT = -20
MAX_LOG_NN_OUTPUT = 2
def sigmoid(x, derivative=False):
"""
Returns the sigmoid function applied to x.
Alternatively, can return the derivative or the sigmoid function.
Args:
x (np.ndarray): The input to the sigmoid function.
derivative (bool): Whether to return the derivative or not.
Default: False.
Returns:
np.ndarray: The sigmoid function (or its derivative) applied to x.
"""
if derivative:
return x * (1 - x)
else:
return 1 / (1 + np.exp(-x))
def softmax(x, axis=-1):
"""
Returns the softmax values for x as:
S(xi) = e^xi / SUMj(e^xj), where j goes over all elements in x.
Args:
x (np.ndarray): The input to the softmax function.
axis (int): The axis along which to softmax.
Returns:
np.ndarray: The softmax over x.
"""
x_exp = np.exp(x)
return np.maximum(x_exp / np.sum(x_exp, axis, keepdims=True), SMALL_NUMBER)
def relu(x, alpha=0.0):
"""
Implementation of the leaky ReLU function:
y = x * alpha if x < 0 else x
Args:
x (np.ndarray): The input values.
alpha (float): A scaling ("leak") factor to use for negative x.
Returns:
np.ndarray: The leaky ReLU output for x.
"""
return np.maximum(x, x*alpha, x)
def one_hot(x, depth=0, on_value=1, off_value=0):
"""
One-hot utility function for numpy.
Thanks to qianyizhang:
https://gist.github.com/qianyizhang/07ee1c15cad08afb03f5de69349efc30.
Args:
x (np.ndarray): The input to be one-hot encoded.
depth (int): The max. number to be one-hot encoded (size of last rank).
on_value (float): The value to use for on. Default: 1.0.
off_value (float): The value to use for off. Default: 0.0.
Returns:
np.ndarray: The one-hot encoded equivalent of the input array.
"""
# Handle bool arrays correctly.
if x.dtype == np.bool_:
x = x.astype(np.int)
depth = 2
if depth == 0:
depth = np.max(x) + 1
assert np.max(x) < depth, \
"ERROR: The max. index of `x` ({}) is larger than depth ({})!".\
format(np.max(x), depth)
shape = x.shape
# Python 2.7 compatibility, (*shape, depth) is not allowed.
shape_list = shape[:]
shape_list.append(depth)
out = np.ones(shape_list) * off_value
indices = []
for i in range(x.ndim):
tiles = [1] * x.ndim
s = [1] * x.ndim
s[i] = -1
r = np.arange(shape[i]).reshape(s)
if i > 0:
tiles[i-1] = shape[i-1]
r = np.tile(r, tiles)
indices.append(r)
indices.append(x)
out[tuple(indices)] = on_value
return out
def fc(x, weights, biases=None):
"""
Calculates the outputs of a fully-connected (dense) layer given
weights/biases and an input.
Args:
x (np.ndarray): The input to the dense layer.
weights (np.ndarray): The weights matrix.
biases (Optional[np.ndarray]): The biases vector. All 0s if None.
Returns:
The dense layer's output.
"""
return np.matmul(x, weights) + (0.0 if biases is None else biases)
def lstm(x, weights, biases=None, initial_internal_states=None,
time_major=False, forget_bias=1.0):
"""
Calculates the outputs of an LSTM layer given weights/biases,
internal_states, and input.
Args:
x (np.ndarray): The inputs to the LSTM layer including time-rank
(0th if time-major, else 1st) and the batch-rank
(1st if time-major, else 0th).
weights (np.ndarray): The weights matrix.
biases (Optional[np.ndarray]): The biases vector. All 0s if None.
initial_internal_states (Optional[np.ndarray]): The initial internal
states to pass into the layer. All 0s if None.
time_major (bool): Whether to use time-major or not. Default: False.
forget_bias (float): Gets added to first sigmoid (forget gate) output.
Default: 1.0.
Returns:
Tuple:
- The LSTM layer's output.
- Tuple: Last (c-state, h-state).
"""
sequence_length = x.shape[0 if time_major else 1]
batch_size = x.shape[1 if time_major else 0]
units = weights.shape[1] // 4 # 4 internal layers (3x sigmoid, 1x tanh)
if initial_internal_states is None:
c_states = np.zeros(shape=(batch_size, units))
h_states = np.zeros(shape=(batch_size, units))
else:
c_states = initial_internal_states[0]
h_states = initial_internal_states[1]
# Create a placeholder for all n-time step outputs.
if time_major:
unrolled_outputs = np.zeros(shape=(sequence_length, batch_size, units))
else:
unrolled_outputs = np.zeros(shape=(batch_size, sequence_length, units))
# Push the batch 4 times through the LSTM cell and capture the outputs plus
# the final h- and c-states.
for t in range(sequence_length):
input_matrix = x[t, :, :] if time_major else x[:, t, :]
input_matrix = np.concatenate((input_matrix, h_states), axis=1)
input_matmul_matrix = np.matmul(input_matrix, weights) + biases
# Forget gate (3rd slot in tf output matrix). Add static forget bias.
sigmoid_1 = sigmoid(input_matmul_matrix[:, units*2:units*3] +
forget_bias)
c_states = np.multiply(c_states, sigmoid_1)
# Add gate (1st and 2nd slots in tf output matrix).
sigmoid_2 = sigmoid(input_matmul_matrix[:, 0:units])
tanh_3 = np.tanh(input_matmul_matrix[:, units:units*2])
c_states = np.add(c_states, np.multiply(sigmoid_2, tanh_3))
# Output gate (last slot in tf output matrix).
sigmoid_4 = sigmoid(input_matmul_matrix[:, units*3:units*4])
h_states = np.multiply(sigmoid_4, np.tanh(c_states))
# Store this output time-slice.
if time_major:
unrolled_outputs[t, :, :] = h_states
else:
unrolled_outputs[:, t, :] = h_states
return unrolled_outputs, (c_states, h_states)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/policy_client.py
|
Python
|
import logging
import pickle
from ray.rllib.utils.annotations import PublicAPI
logger = logging.getLogger(__name__)
try:
import requests # `requests` is not part of stdlib.
except ImportError:
requests = None
logger.warning(
"Couldn't import `requests` library. Be sure to install it on"
" the client side.")
@PublicAPI
class PolicyClient:
"""REST client to interact with a RLlib policy server."""
START_EPISODE = "START_EPISODE"
GET_ACTION = "GET_ACTION"
LOG_ACTION = "LOG_ACTION"
LOG_RETURNS = "LOG_RETURNS"
END_EPISODE = "END_EPISODE"
@PublicAPI
def __init__(self, address):
self._address = address
@PublicAPI
def start_episode(self, episode_id=None, training_enabled=True):
"""Record the start of an episode.
Arguments:
episode_id (str): Unique string id for the episode or None for
it to be auto-assigned.
training_enabled (bool): Whether to use experiences for this
episode to improve the policy.
Returns:
episode_id (str): Unique string id for the episode.
"""
return self._send({
"episode_id": episode_id,
"command": PolicyClient.START_EPISODE,
"training_enabled": training_enabled,
})["episode_id"]
@PublicAPI
def get_action(self, episode_id, observation):
"""Record an observation and get the on-policy action.
Arguments:
episode_id (str): Episode id returned from start_episode().
observation (obj): Current environment observation.
Returns:
action (obj): Action from the env action space.
"""
return self._send({
"command": PolicyClient.GET_ACTION,
"observation": observation,
"episode_id": episode_id,
})["action"]
@PublicAPI
def log_action(self, episode_id, observation, action):
"""Record an observation and (off-policy) action taken.
Arguments:
episode_id (str): Episode id returned from start_episode().
observation (obj): Current environment observation.
action (obj): Action for the observation.
"""
self._send({
"command": PolicyClient.LOG_ACTION,
"observation": observation,
"action": action,
"episode_id": episode_id,
})
@PublicAPI
def log_returns(self, episode_id, reward, info=None):
"""Record returns from the environment.
The reward will be attributed to the previous action taken by the
episode. Rewards accumulate until the next action. If no reward is
logged before the next action, a reward of 0.0 is assumed.
Arguments:
episode_id (str): Episode id returned from start_episode().
reward (float): Reward from the environment.
"""
self._send({
"command": PolicyClient.LOG_RETURNS,
"reward": reward,
"info": info,
"episode_id": episode_id,
})
@PublicAPI
def end_episode(self, episode_id, observation):
"""Record the end of an episode.
Arguments:
episode_id (str): Episode id returned from start_episode().
observation (obj): Current environment observation.
"""
self._send({
"command": PolicyClient.END_EPISODE,
"observation": observation,
"episode_id": episode_id,
})
def _send(self, data):
payload = pickle.dumps(data)
response = requests.post(self._address, data=payload)
if response.status_code != 200:
logger.error("Request failed {}: {}".format(response.text, data))
response.raise_for_status()
parsed = pickle.loads(response.content)
return parsed
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/policy_server.py
|
Python
|
import pickle
import traceback
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.policy_client import PolicyClient
@PublicAPI
class PolicyServer(ThreadingMixIn, HTTPServer):
"""REST server than can be launched from a ExternalEnv.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib.
Examples:
>>> class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(
low=-10,
high=10,
shape=(4,),
dtype=np.float32))
def run(self):
server = PolicyServer(self, "localhost", 8900)
server.serve_forever()
>>> register_env("srv", lambda _: CartpoleServing())
>>> pg = PGTrainer(env="srv", config={"num_workers": 0})
>>> while True:
pg.train()
>>> client = PolicyClient("localhost:8900")
>>> eps_id = client.start_episode()
>>> action = client.get_action(eps_id, obs)
>>> ...
>>> client.log_returns(eps_id, reward)
>>> ...
>>> client.log_returns(eps_id, reward)
"""
@PublicAPI
def __init__(self, external_env, address, port):
handler = _make_handler(external_env)
HTTPServer.__init__(self, (address, port), handler)
def _make_handler(external_env):
class Handler(SimpleHTTPRequestHandler):
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
if command == PolicyClient.START_EPISODE:
response["episode_id"] = external_env.start_episode(
args["episode_id"], args["training_enabled"])
elif command == PolicyClient.GET_ACTION:
response["action"] = external_env.get_action(
args["episode_id"], args["observation"])
elif command == PolicyClient.LOG_ACTION:
external_env.log_action(args["episode_id"],
args["observation"], args["action"])
elif command == PolicyClient.LOG_RETURNS:
external_env.log_returns(args["episode_id"], args["reward"],
args["info"])
elif command == PolicyClient.END_EPISODE:
external_env.end_episode(args["episode_id"],
args["observation"])
else:
raise Exception("Unknown command: {}".format(command))
return response
return Handler
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/schedules.py
|
Python
|
"""This file is used for specifying various schedules that evolve over
time throughout the execution of the algorithm, such as:
- learning rate for the optimizer
- exploration epsilon for the epsilon greedy exploration strategy
- beta parameter for beta parameter in prioritized replay
Each schedule has a function `value(t)` which returns the current value
of the parameter given the timestep t of the optimization procedure.
"""
class Schedule:
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule:
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule:
def __init__(self,
endpoints,
interpolation=linear_interpolation,
outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g.
`(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value
outputs `interpolation(value_a, value_b, alpha)` where alpha is a
fraction of time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t
according to the `endpoints`. Alpha is the fraction of distance from
left endpoint to right endpoint that t has covered. See
linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1],
self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule:
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/seed.py
|
Python
|
import numpy as np
import random
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def seed(np_seed=0, random_seed=0, tf_seed=0):
np.random.seed(np_seed)
random.seed(random_seed)
tf.set_random_seed(tf_seed)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/test_utils.py
|
Python
|
import numpy as np
from ray.rllib.utils.framework import try_import_tf
tf = try_import_tf()
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
"""
Checks two structures (dict, tuple, list,
np.array, float, int, etc..) for (almost) numeric identity.
All numbers in the two structures have to match up to `decimal` digits
after the floating point. Uses assertions.
Args:
x (any): The first value to be compared (to `y`).
y (any): The second value to be compared (to `x`).
decimals (int): The number of digits after the floating point up to
which all numeric values have to match.
atol (float): Absolute tolerance of the difference between x and y
(overrides `decimals` if given).
rtol (float): Relative tolerance of the difference between x and y
(overrides `decimals` if given).
false (bool): Whether to check that x and y are NOT the same.
"""
# A dict type.
if isinstance(x, dict):
assert isinstance(y, dict), \
"ERROR: If x is dict, y needs to be a dict as well!"
y_keys = set(x.keys())
for key, value in x.items():
assert key in y, \
"ERROR: y does not have x's key='{}'! y={}".format(key, y)
check(value, y[key], decimals=decimals, atol=atol, rtol=rtol,
false=false)
y_keys.remove(key)
assert not y_keys, \
"ERROR: y contains keys ({}) that are not in x! y={}".\
format(list(y_keys), y)
# A tuple type.
elif isinstance(x, (tuple, list)):
assert isinstance(y, (tuple, list)),\
"ERROR: If x is tuple, y needs to be a tuple as well!"
assert len(y) == len(x),\
"ERROR: y does not have the same length as x ({} vs {})!".\
format(len(y), len(x))
for i, value in enumerate(x):
check(value, y[i], decimals=decimals, atol=atol, rtol=rtol,
false=false)
# Boolean comparison.
elif isinstance(x, (np.bool_, bool)):
if false is True:
assert bool(x) is not bool(y), \
"ERROR: x ({}) is y ({})!".format(x, y)
else:
assert bool(x) is bool(y), \
"ERROR: x ({}) is not y ({})!".format(x, y)
# Nones.
elif x is None or y is None:
if false is True:
assert x != y, "ERROR: x ({}) is the same as y ({})!".format(x, y)
else:
assert x == y, \
"ERROR: x ({}) is not the same as y ({})!".format(x, y)
# String comparison.
elif hasattr(x, "dtype") and x.dtype == np.object:
try:
np.testing.assert_array_equal(x, y)
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
# Everything else (assume numeric).
else:
# Numpyize tensors if necessary.
if tf is not None and isinstance(x, tf.Tensor):
x = x.numpy()
if tf is not None and isinstance(y, tf.Tensor):
y = y.numpy()
# Using decimals.
if atol is None and rtol is None:
try:
np.testing.assert_almost_equal(x, y, decimal=decimals)
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
# Using atol/rtol.
else:
# Provide defaults for either one of atol/rtol.
if atol is None:
atol = 0
if rtol is None:
rtol = 1e-7
try:
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/tf_ops.py
|
Python
|
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta))
def reduce_mean_ignore_inf(x, axis):
"""Same as tf.reduce_mean() but ignores -inf values."""
mask = tf.not_equal(x, tf.float32.min)
x_zeroed = tf.where(mask, x, tf.zeros_like(x))
return (tf.reduce_sum(x_zeroed, axis) / tf.reduce_sum(
tf.cast(mask, tf.float32), axis))
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return gradients
def make_tf_callable(session_or_none, dynamic_shape=False):
"""Returns a function that can be executed in either graph or eager mode.
The function must take only positional args.
If eager is enabled, this will act as just a function. Otherwise, it
will build a function that executes a session run with placeholders
internally.
Arguments:
session_or_none (tf.Session): tf.Session if in graph mode, else None.
dynamic_shape (bool): True if the placeholders should have a dynamic
batch dimension. Otherwise they will be fixed shape.
Returns:
a Python function that can be called in either mode.
"""
if tf.executing_eagerly():
assert session_or_none is None
else:
assert session_or_none is not None
def make_wrapper(fn):
if session_or_none:
placeholders = []
symbolic_out = [None]
def call(*args):
args_flat = []
for a in args:
if type(a) is list:
args_flat.extend(a)
else:
args_flat.append(a)
args = args_flat
if symbolic_out[0] is None:
with session_or_none.graph.as_default():
for i, v in enumerate(args):
if dynamic_shape:
if len(v.shape) > 0:
shape = (None, ) + v.shape[1:]
else:
shape = ()
else:
shape = v.shape
placeholders.append(
tf.placeholder(
dtype=v.dtype,
shape=shape,
name="arg_{}".format(i)))
symbolic_out[0] = fn(*placeholders)
feed_dict = dict(zip(placeholders, args))
return session_or_none.run(symbolic_out[0], feed_dict)
return call
else:
return fn
return make_wrapper
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/tf_run_builder.py
|
Python
|
import logging
import os
import time
from ray.rllib.utils.debug import log_once
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
class TFRunBuilder:
"""Used to incrementally build up a TensorFlow run.
This is particularly useful for batching ops from multiple different
policies in the multi-agent setting.
"""
def __init__(self, session, debug_name):
self.session = session
self.debug_name = debug_name
self.feed_dict = {}
self.fetches = []
self._executed = None
def add_feed_dict(self, feed_dict):
assert not self._executed
for k in feed_dict:
if k in self.feed_dict:
raise ValueError("Key added twice: {}".format(k))
self.feed_dict.update(feed_dict)
def add_fetches(self, fetches):
assert not self._executed
base_index = len(self.fetches)
self.fetches.extend(fetches)
return list(range(base_index, len(self.fetches)))
def get(self, to_fetch):
if self._executed is None:
try:
self._executed = run_timeline(
self.session, self.fetches, self.debug_name,
self.feed_dict, os.environ.get("TF_TIMELINE_DIR"))
except Exception:
logger.exception("Error fetching: {}, feed_dict={}".format(
self.fetches, self.feed_dict))
raise ValueError("Error fetching: {}, feed_dict={}".format(
self.fetches, self.feed_dict))
if isinstance(to_fetch, int):
return self._executed[to_fetch]
elif isinstance(to_fetch, list):
return [self.get(x) for x in to_fetch]
elif isinstance(to_fetch, tuple):
return tuple(self.get(x) for x in to_fetch)
else:
raise ValueError("Unsupported fetch type: {}".format(to_fetch))
_count = 0
def run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None):
if timeline_dir:
from tensorflow.python.client import timeline
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
start = time.time()
fetches = sess.run(
ops,
options=run_options,
run_metadata=run_metadata,
feed_dict=feed_dict)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
global _count
outf = os.path.join(
timeline_dir, "timeline-{}-{}-{}.json".format(
debug_name, os.getpid(), _count % 10))
_count += 1
trace_file = open(outf, "w")
logger.info("Wrote tf timeline ({} s) to {}".format(
time.time() - start, os.path.abspath(outf)))
trace_file.write(trace.generate_chrome_trace_format())
else:
if log_once("tf_timeline"):
logger.info(
"Executing TF run without tracing. To dump TF timeline traces "
"to disk, set the TF_TIMELINE_DIR environment variable.")
fetches = sess.run(ops, feed_dict=feed_dict)
return fetches
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/timer.py
|
Python
|
import numpy as np
import time
class TimerStat:
"""A running stat for conveniently logging the duration of a code block.
Example:
wait_timer = TimerStat()
with wait_timer:
ray.wait(...)
Note that this class is *not* thread-safe.
"""
def __init__(self, window_size=10):
self._window_size = window_size
self._samples = []
self._units_processed = []
self._start_time = None
self._total_time = 0.0
self.count = 0
def __enter__(self):
assert self._start_time is None, "concurrent updates not supported"
self._start_time = time.time()
def __exit__(self, type, value, tb):
assert self._start_time is not None
time_delta = time.time() - self._start_time
self.push(time_delta)
self._start_time = None
def push(self, time_delta):
self._samples.append(time_delta)
if len(self._samples) > self._window_size:
self._samples.pop(0)
self.count += 1
self._total_time += time_delta
def push_units_processed(self, n):
self._units_processed.append(n)
if len(self._units_processed) > self._window_size:
self._units_processed.pop(0)
@property
def mean(self):
return np.mean(self._samples)
@property
def mean_units_processed(self):
return float(np.mean(self._units_processed))
@property
def mean_throughput(self):
time_total = sum(self._samples)
if not time_total:
return 0.0
return sum(self._units_processed) / time_total
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/torch_ops.py
|
Python
|
from ray.rllib.utils.framework import try_import_torch
torch, _ = try_import_torch()
def sequence_mask(lengths, maxlen, dtype=torch.bool):
"""
Exact same behavior as tf.sequence_mask.
Thanks to Dimitris Papatheodorou
(https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/39036).
"""
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen)).cumsum(dim=1).t() > lengths).\
t()
mask.type(dtype)
return mask
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/tracking_dict.py
|
Python
|
class UsageTrackingDict(dict):
"""Dict that tracks which keys have been accessed.
It can also intercept gets and allow an arbitrary callback to be applied
(i.e., to lazily convert numpy arrays to Tensors).
We make the simplifying assumption only __getitem__ is used to access
values.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.accessed_keys = set()
self.intercepted_values = {}
self.get_interceptor = None
def set_get_interceptor(self, fn):
self.get_interceptor = fn
def __getitem__(self, key):
self.accessed_keys.add(key)
value = dict.__getitem__(self, key)
if self.get_interceptor:
if key not in self.intercepted_values:
self.intercepted_values[key] = self.get_interceptor(value)
value = self.intercepted_values[key]
return value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if key in self.intercepted_values:
self.intercepted_values[key] = value
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/utils/window_stat.py
|
Python
|
import numpy as np
class WindowStat:
def __init__(self, name, n):
self.name = name
self.items = [None] * n
self.idx = 0
self.count = 0
def push(self, obj):
self.items[self.idx] = obj
self.idx += 1
self.count += 1
self.idx %= len(self.items)
def stats(self):
if not self.count:
quantiles = []
else:
quantiles = np.percentile(self.items[:self.count],
[0, 10, 50, 90, 100]).tolist()
return {
self.name + "_count": int(self.count),
self.name + "_mean": float(np.mean(self.items[:self.count])),
self.name + "_std": float(np.std(self.items[:self.count])),
self.name + "_quantiles": quantiles,
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
setup_hooks.sh
|
Shell
|
#!/bin/bash
ln -s $PWD/scripts/pre-push $PWD/.git/hooks/pre-push
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/buffer.h
|
C/C++ Header
|
#ifndef RAY_COMMON_BUFFER_H
#define RAY_COMMON_BUFFER_H
#include <cstdint>
#include <cstdio>
#include "plasma/client.h"
#include "ray/common/status.h"
namespace arrow {
class Buffer;
}
namespace ray {
/// The interface that represents a buffer of bytes.
class Buffer {
public:
/// Pointer to the data.
virtual uint8_t *Data() const = 0;
/// Size of this buffer.
virtual size_t Size() const = 0;
/// Whether this buffer owns the data.
virtual bool OwnsData() const = 0;
virtual bool IsPlasmaBuffer() const = 0;
virtual ~Buffer(){};
bool operator==(const Buffer &rhs) const {
if (this->Size() != rhs.Size()) {
return false;
}
return this->Size() == 0 || memcmp(Data(), rhs.Data(), Size()) == 0;
}
};
/// Represents a byte buffer in local memory.
class LocalMemoryBuffer : public Buffer {
public:
/// Constructor.
///
/// By default when initializing a LocalMemoryBuffer with a data pointer and a length,
/// it just assigns the pointer and length without coping the data content. This is
/// for performance reasons. In this case the buffer cannot ensure data validity. It
/// instead relies on the lifetime passed in data pointer.
///
/// \param data The data pointer to the passed-in buffer.
/// \param size The size of the passed in buffer.
/// \param copy_data If true, data will be copied and owned by this buffer,
/// otherwise the buffer only points to the given address.
LocalMemoryBuffer(uint8_t *data, size_t size, bool copy_data = false)
: has_data_copy_(copy_data) {
if (copy_data) {
RAY_CHECK(data != nullptr);
buffer_.resize(size);
std::copy(data, data + size, buffer_.begin());
data_ = buffer_.data();
size_ = buffer_.size();
} else {
data_ = data;
size_ = size;
}
}
/// Construct a LocalMemoryBuffer of all zeros of the given size.
LocalMemoryBuffer(size_t size) : has_data_copy_(true) {
buffer_.resize(size, 0);
data_ = buffer_.data();
size_ = buffer_.size();
}
uint8_t *Data() const override { return data_; }
size_t Size() const override { return size_; }
bool OwnsData() const override { return has_data_copy_; }
bool IsPlasmaBuffer() const override { return false; }
~LocalMemoryBuffer() { size_ = 0; }
private:
/// Disable copy constructor and assignment, as default copy will
/// cause invalid data_.
LocalMemoryBuffer &operator=(const LocalMemoryBuffer &) = delete;
LocalMemoryBuffer(const LocalMemoryBuffer &) = delete;
/// Pointer to the data.
uint8_t *data_;
/// Size of the buffer.
size_t size_;
/// Whether this buffer holds a copy of data.
bool has_data_copy_;
/// This is only valid when `should_copy` is true.
std::vector<uint8_t> buffer_;
};
/// Represents a byte buffer for plasma object. This can be used to hold the
/// reference to a plasma object (via the underlying plasma::PlasmaBuffer).
class PlasmaBuffer : public Buffer {
public:
PlasmaBuffer(std::shared_ptr<arrow::Buffer> buffer) : buffer_(buffer) {}
uint8_t *Data() const override { return const_cast<uint8_t *>(buffer_->data()); }
size_t Size() const override { return buffer_->size(); }
bool OwnsData() const override { return true; }
bool IsPlasmaBuffer() const override { return true; }
private:
/// shared_ptr to arrow buffer which can potentially hold a reference
/// for the object (when it's a plasma::PlasmaBuffer).
std::shared_ptr<arrow::Buffer> buffer_;
};
} // namespace ray
#endif // RAY_COMMON_BUFFER_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/client_connection.cc
|
C++
|
#include "client_connection.h"
#include <stdio.h>
#include <boost/bind.hpp>
#include <sstream>
#include "ray/common/ray_config.h"
#include "ray/util/util.h"
namespace ray {
ray::Status TcpConnect(boost::asio::ip::tcp::socket &socket,
const std::string &ip_address_string, int port) {
// Disable Nagle's algorithm, which caused transfer delays of 10s of ms in
// certain cases.
socket.open(boost::asio::ip::tcp::v4());
boost::asio::ip::tcp::no_delay option(true);
socket.set_option(option);
boost::asio::ip::address ip_address =
boost::asio::ip::address::from_string(ip_address_string);
boost::asio::ip::tcp::endpoint endpoint(ip_address, port);
boost::system::error_code error;
socket.connect(endpoint, error);
const auto status = boost_to_ray_status(error);
if (!status.ok()) {
// Close the socket if the connect failed.
boost::system::error_code close_error;
socket.close(close_error);
}
return status;
}
template <class T>
std::shared_ptr<ServerConnection<T>> ServerConnection<T>::Create(
boost::asio::basic_stream_socket<T> &&socket) {
std::shared_ptr<ServerConnection<T>> self(new ServerConnection(std::move(socket)));
return self;
}
template <class T>
ServerConnection<T>::ServerConnection(boost::asio::basic_stream_socket<T> &&socket)
: socket_(std::move(socket)),
async_write_max_messages_(1),
async_write_queue_(),
async_write_in_flight_(false),
async_write_broken_pipe_(false) {}
template <class T>
ServerConnection<T>::~ServerConnection() {
// If there are any pending messages, invoke their callbacks with an IOError status.
for (const auto &write_buffer : async_write_queue_) {
write_buffer->handler(Status::IOError("Connection closed."));
}
}
template <class T>
Status ServerConnection<T>::WriteBuffer(
const std::vector<boost::asio::const_buffer> &buffer) {
boost::system::error_code error;
// Loop until all bytes are written while handling interrupts.
// When profiling with pprof, unhandled interrupts were being sent by the profiler to
// the raylet process, which was causing synchronous reads and writes to fail.
for (const auto &b : buffer) {
uint64_t bytes_remaining = boost::asio::buffer_size(b);
uint64_t position = 0;
while (bytes_remaining != 0) {
size_t bytes_written =
socket_.write_some(boost::asio::buffer(b + position, bytes_remaining), error);
position += bytes_written;
bytes_remaining -= bytes_written;
if (error.value() == EINTR) {
continue;
} else if (error.value() != boost::system::errc::errc_t::success) {
return boost_to_ray_status(error);
}
}
}
return ray::Status::OK();
}
template <class T>
Status ServerConnection<T>::ReadBuffer(
const std::vector<boost::asio::mutable_buffer> &buffer) {
boost::system::error_code error;
// Loop until all bytes are read while handling interrupts.
for (const auto &b : buffer) {
uint64_t bytes_remaining = boost::asio::buffer_size(b);
uint64_t position = 0;
while (bytes_remaining != 0) {
size_t bytes_read =
socket_.read_some(boost::asio::buffer(b + position, bytes_remaining), error);
position += bytes_read;
bytes_remaining -= bytes_read;
if (error.value() == EINTR) {
continue;
} else if (error.value() != boost::system::errc::errc_t::success) {
return boost_to_ray_status(error);
}
}
}
return Status::OK();
}
template <class T>
ray::Status ServerConnection<T>::WriteMessage(int64_t type, int64_t length,
const uint8_t *message) {
sync_writes_ += 1;
bytes_written_ += length;
std::vector<boost::asio::const_buffer> message_buffers;
auto write_cookie = RayConfig::instance().ray_cookie();
message_buffers.push_back(boost::asio::buffer(&write_cookie, sizeof(write_cookie)));
message_buffers.push_back(boost::asio::buffer(&type, sizeof(type)));
message_buffers.push_back(boost::asio::buffer(&length, sizeof(length)));
message_buffers.push_back(boost::asio::buffer(message, length));
return WriteBuffer(message_buffers);
}
template <class T>
void ServerConnection<T>::WriteMessageAsync(
int64_t type, int64_t length, const uint8_t *message,
const std::function<void(const ray::Status &)> &handler) {
async_writes_ += 1;
bytes_written_ += length;
auto write_buffer = std::unique_ptr<AsyncWriteBuffer>(new AsyncWriteBuffer());
write_buffer->write_cookie = RayConfig::instance().ray_cookie();
write_buffer->write_type = type;
write_buffer->write_length = length;
write_buffer->write_message.resize(length);
write_buffer->write_message.assign(message, message + length);
write_buffer->handler = handler;
auto size = async_write_queue_.size();
auto size_is_power_of_two = (size & (size - 1)) == 0;
if (size > 1000 && size_is_power_of_two) {
RAY_LOG(WARNING) << "ServerConnection has " << size << " buffered async writes";
}
async_write_queue_.push_back(std::move(write_buffer));
if (!async_write_in_flight_) {
DoAsyncWrites();
}
}
template <class T>
void ServerConnection<T>::DoAsyncWrites() {
// Make sure we were not writing to the socket.
RAY_CHECK(!async_write_in_flight_);
async_write_in_flight_ = true;
// Do an async write of everything currently in the queue to the socket.
std::vector<boost::asio::const_buffer> message_buffers;
int num_messages = 0;
for (const auto &write_buffer : async_write_queue_) {
message_buffers.push_back(boost::asio::buffer(&write_buffer->write_cookie,
sizeof(write_buffer->write_cookie)));
message_buffers.push_back(
boost::asio::buffer(&write_buffer->write_type, sizeof(write_buffer->write_type)));
message_buffers.push_back(boost::asio::buffer(&write_buffer->write_length,
sizeof(write_buffer->write_length)));
message_buffers.push_back(boost::asio::buffer(write_buffer->write_message));
num_messages++;
if (num_messages >= async_write_max_messages_) {
break;
}
}
// Helper function to call all handlers with the input status.
auto call_handlers = [this](const ray::Status &status, int num_messages) {
for (int i = 0; i < num_messages; i++) {
auto write_buffer = std::move(async_write_queue_.front());
write_buffer->handler(status);
async_write_queue_.pop_front();
}
// We finished writing, so mark that we're no longer doing an async write.
async_write_in_flight_ = false;
// If there is more to write, try to write the rest.
if (!async_write_queue_.empty()) {
DoAsyncWrites();
}
};
if (async_write_broken_pipe_) {
// Call the handlers directly. Because writing messages to a connection
// with broken-pipe status will result in the callbacks never being called.
call_handlers(ray::Status::IOError("Broken pipe"), num_messages);
return;
}
auto this_ptr = this->shared_from_this();
boost::asio::async_write(
ServerConnection<T>::socket_, message_buffers,
[this, this_ptr, num_messages, call_handlers](
const boost::system::error_code &error, size_t bytes_transferred) {
ray::Status status = boost_to_ray_status(error);
if (error.value() == boost::system::errc::errc_t::broken_pipe) {
RAY_LOG(ERROR) << "Broken Pipe happened during calling "
<< "ServerConnection<T>::DoAsyncWrites.";
// From now on, calling DoAsyncWrites will directly call the handler
// with this broken-pipe status.
async_write_broken_pipe_ = true;
} else if (!status.ok()) {
RAY_LOG(ERROR) << "Error encountered during calling "
<< "ServerConnection<T>::DoAsyncWrites, message: "
<< status.message()
<< ", error code: " << static_cast<int>(error.value());
}
call_handlers(status, num_messages);
});
}
template <class T>
std::shared_ptr<ClientConnection<T>> ClientConnection<T>::Create(
ClientHandler<T> &client_handler, MessageHandler<T> &message_handler,
boost::asio::basic_stream_socket<T> &&socket, const std::string &debug_label,
const std::vector<std::string> &message_type_enum_names, int64_t error_message_type) {
std::shared_ptr<ClientConnection<T>> self(
new ClientConnection(message_handler, std::move(socket), debug_label,
message_type_enum_names, error_message_type));
// Let our manager process our new connection.
client_handler(*self);
return self;
}
template <class T>
ClientConnection<T>::ClientConnection(
MessageHandler<T> &message_handler, boost::asio::basic_stream_socket<T> &&socket,
const std::string &debug_label,
const std::vector<std::string> &message_type_enum_names, int64_t error_message_type)
: ServerConnection<T>(std::move(socket)),
registered_(false),
message_handler_(message_handler),
debug_label_(debug_label),
message_type_enum_names_(message_type_enum_names),
error_message_type_(error_message_type) {}
template <class T>
void ClientConnection<T>::Register() {
RAY_CHECK(!registered_);
registered_ = true;
}
template <class T>
void ClientConnection<T>::ProcessMessages() {
// Wait for a message header from the client. The message header includes the
// protocol version, the message type, and the length of the message.
std::vector<boost::asio::mutable_buffer> header;
header.push_back(boost::asio::buffer(&read_cookie_, sizeof(read_cookie_)));
header.push_back(boost::asio::buffer(&read_type_, sizeof(read_type_)));
header.push_back(boost::asio::buffer(&read_length_, sizeof(read_length_)));
boost::asio::async_read(
ServerConnection<T>::socket_, header,
boost::bind(&ClientConnection<T>::ProcessMessageHeader,
shared_ClientConnection_from_this(), boost::asio::placeholders::error));
}
template <class T>
void ClientConnection<T>::ProcessMessageHeader(const boost::system::error_code &error) {
if (error) {
// If there was an error, disconnect the client.
read_type_ = error_message_type_;
read_length_ = 0;
ProcessMessage(error);
return;
}
// If there was no error, make sure the ray cookie matches.
if (!CheckRayCookie()) {
ServerConnection<T>::Close();
return;
}
// Resize the message buffer to match the received length.
read_message_.resize(read_length_);
ServerConnection<T>::bytes_read_ += read_length_;
// Wait for the message to be read.
boost::asio::async_read(
ServerConnection<T>::socket_, boost::asio::buffer(read_message_),
boost::bind(&ClientConnection<T>::ProcessMessage,
shared_ClientConnection_from_this(), boost::asio::placeholders::error));
}
template <class T>
bool ClientConnection<T>::CheckRayCookie() {
if (read_cookie_ == RayConfig::instance().ray_cookie()) {
return true;
}
// Cookie is not matched.
// Only assert if the message is coming from a known remote endpoint,
// which is indicated by a non-nil client ID. This is to protect raylet
// against miscellaneous connections. We did see cases where bad data
// is received from local unknown program which crashes raylet.
std::ostringstream ss;
ss << " ray cookie mismatch for received message. "
<< "received cookie: " << read_cookie_ << ", debug label: " << debug_label_;
auto remote_endpoint_info = RemoteEndpointInfo();
if (!remote_endpoint_info.empty()) {
ss << ", remote endpoint info: " << remote_endpoint_info;
}
if (registered_) {
// This is from a known client, which indicates a bug.
RAY_LOG(FATAL) << ss.str();
} else {
// It's not from a known client, log this message, and stop processing the connection.
RAY_LOG(WARNING) << ss.str();
}
return false;
}
template <class T>
std::string ClientConnection<T>::RemoteEndpointInfo() {
return std::string();
}
template <>
std::string ClientConnection<remote_stream_protocol>::RemoteEndpointInfo() {
const auto &remote_endpoint =
ServerConnection<remote_stream_protocol>::socket_.remote_endpoint();
return remote_endpoint.address().to_string() + ":" +
std::to_string(remote_endpoint.port());
}
template <class T>
void ClientConnection<T>::ProcessMessage(const boost::system::error_code &error) {
if (error) {
read_type_ = error_message_type_;
}
int64_t start_ms = current_time_ms();
message_handler_(shared_ClientConnection_from_this(), read_type_, read_message_.data());
int64_t interval = current_time_ms() - start_ms;
if (interval > RayConfig::instance().handler_warning_timeout_ms()) {
std::string message_type;
if (message_type_enum_names_.empty()) {
message_type = std::to_string(read_type_);
} else {
message_type = message_type_enum_names_[read_type_];
}
RAY_LOG(WARNING) << "[" << debug_label_ << "]ProcessMessage with type "
<< message_type << " took " << interval << " ms.";
}
}
template <class T>
std::string ServerConnection<T>::DebugString() const {
std::stringstream result;
result << "\n- bytes read: " << bytes_read_;
result << "\n- bytes written: " << bytes_written_;
result << "\n- num async writes: " << async_writes_;
result << "\n- num sync writes: " << sync_writes_;
result << "\n- writing: " << async_write_in_flight_;
int64_t num_bytes = 0;
for (auto &buffer : async_write_queue_) {
num_bytes += buffer->write_length;
}
result << "\n- pending async bytes: " << num_bytes;
return result.str();
}
template class ServerConnection<local_stream_protocol>;
template class ServerConnection<remote_stream_protocol>;
template class ClientConnection<local_stream_protocol>;
template class ClientConnection<remote_stream_protocol>;
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/client_connection.h
|
C/C++ Header
|
#ifndef RAY_COMMON_CLIENT_CONNECTION_H
#define RAY_COMMON_CLIENT_CONNECTION_H
#include <deque>
#include <memory>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "ray/common/id.h"
#include "ray/common/status.h"
namespace ray {
/// Connect a TCP socket.
///
/// \param socket The socket to connect.
/// \param ip_address The IP address to connect to.
/// \param port The port to connect to.
/// \return Status.
ray::Status TcpConnect(boost::asio::ip::tcp::socket &socket,
const std::string &ip_address, int port);
/// \typename ServerConnection
///
/// A generic type representing a client connection to a server. This typename
/// can be used to write messages synchronously to the server.
template <typename T>
class ServerConnection : public std::enable_shared_from_this<ServerConnection<T>> {
public:
/// ServerConnection destructor.
virtual ~ServerConnection();
/// Allocate a new server connection.
///
/// \param socket A reference to the server socket.
/// \return std::shared_ptr<ServerConnection>.
static std::shared_ptr<ServerConnection<T>> Create(
boost::asio::basic_stream_socket<T> &&socket);
/// Write a message to the client.
///
/// \param type The message type (e.g., a flatbuffer enum).
/// \param length The size in bytes of the message.
/// \param message A pointer to the message buffer.
/// \return Status.
ray::Status WriteMessage(int64_t type, int64_t length, const uint8_t *message);
/// Write a message to the client asynchronously.
///
/// \param type The message type (e.g., a flatbuffer enum).
/// \param length The size in bytes of the message.
/// \param message A pointer to the message buffer.
/// \param handler A callback to run on write completion.
void WriteMessageAsync(int64_t type, int64_t length, const uint8_t *message,
const std::function<void(const ray::Status &)> &handler);
/// Write a buffer to this connection.
///
/// \param buffer The buffer.
/// \return Status.
Status WriteBuffer(const std::vector<boost::asio::const_buffer> &buffer);
/// Read a buffer from this connection.
///
/// \param buffer The buffer.
/// \return Status.
Status ReadBuffer(const std::vector<boost::asio::mutable_buffer> &buffer);
/// Shuts down socket for this connection.
void Close() {
boost::system::error_code ec;
socket_.close(ec);
}
std::string DebugString() const;
protected:
/// A private constructor for a server connection.
ServerConnection(boost::asio::basic_stream_socket<T> &&socket);
/// A message that is queued for writing asynchronously.
struct AsyncWriteBuffer {
int64_t write_cookie;
int64_t write_type;
uint64_t write_length;
std::vector<uint8_t> write_message;
std::function<void(const ray::Status &)> handler;
};
/// The socket connection to the server.
boost::asio::basic_stream_socket<T> socket_;
/// Max number of messages to write out at once.
const int async_write_max_messages_;
/// List of pending messages to write.
std::deque<std::unique_ptr<AsyncWriteBuffer>> async_write_queue_;
/// Whether we are in the middle of an async write.
bool async_write_in_flight_;
/// Whether we've met a broken-pipe error during writing.
bool async_write_broken_pipe_;
/// Count of async messages sent total.
int64_t async_writes_ = 0;
/// Count of sync messages sent total.
int64_t sync_writes_ = 0;
/// Count of bytes sent total.
int64_t bytes_written_ = 0;
/// Count of bytes read total.
int64_t bytes_read_ = 0;
private:
/// Asynchronously flushes the write queue. While async writes are running, the flag
/// async_write_in_flight_ will be set. This should only be called when no async writes
/// are currently in flight.
void DoAsyncWrites();
};
template <typename T>
class ClientConnection;
template <typename T>
using ClientHandler = std::function<void(ClientConnection<T> &)>;
template <typename T>
using MessageHandler =
std::function<void(std::shared_ptr<ClientConnection<T>>, int64_t, const uint8_t *)>;
/// \typename ClientConnection
///
/// A generic type representing a client connection on a server. In addition to
/// writing messages to the client, like in ServerConnection, this typename can
/// also be used to process messages asynchronously from client.
template <typename T>
class ClientConnection : public ServerConnection<T> {
public:
using std::enable_shared_from_this<ServerConnection<T>>::shared_from_this;
/// Allocate a new node client connection.
///
/// \param new_client_handler A reference to the client handler.
/// \param message_handler A reference to the message handler.
/// \param socket The client socket.
/// \param debug_label Label that is printed in debug messages, to identify
/// the type of client.
/// \param message_type_enum_names A table of printable enum names for the
/// message types received from this client, used for debug messages.
/// \return std::shared_ptr<ClientConnection>.
static std::shared_ptr<ClientConnection<T>> Create(
ClientHandler<T> &new_client_handler, MessageHandler<T> &message_handler,
boost::asio::basic_stream_socket<T> &&socket, const std::string &debug_label,
const std::vector<std::string> &message_type_enum_names,
int64_t error_message_type);
std::shared_ptr<ClientConnection<T>> shared_ClientConnection_from_this() {
return std::static_pointer_cast<ClientConnection<T>>(shared_from_this());
}
/// Register the client.
void Register();
/// Listen for and process messages from the client connection. Once a
/// message has been fully received, the client manager's
/// ProcessClientMessage handler will be called.
void ProcessMessages();
private:
/// A private constructor for a node client connection.
ClientConnection(MessageHandler<T> &message_handler,
boost::asio::basic_stream_socket<T> &&socket,
const std::string &debug_label,
const std::vector<std::string> &message_type_enum_names,
int64_t error_message_type);
/// Process an error from the last operation, then process the message
/// header from the client.
void ProcessMessageHeader(const boost::system::error_code &error);
/// Process an error from reading the message header, then process the
/// message from the client.
void ProcessMessage(const boost::system::error_code &error);
/// Check if the ray cookie in a received message is correct. Note, if the cookie
/// is wrong and the remote endpoint is known, raylet process will crash. If the remote
/// endpoint is unknown, this method will only print a warning.
///
/// \return If the cookie is correct.
bool CheckRayCookie();
/// Return information about IP and port for the remote endpoint. For local connection
/// this returns an empty string.
///
/// \return Information of remote endpoint.
std::string RemoteEndpointInfo();
/// Whether the client has sent us a registration message yet.
bool registered_;
/// The handler for a message from the client.
MessageHandler<T> message_handler_;
/// A label used for debug messages.
const std::string debug_label_;
/// A table of printable enum names for the message types, used for debug
/// messages.
const std::vector<std::string> message_type_enum_names_;
/// The value for disconnect client message.
int64_t error_message_type_;
/// Buffers for the current message being read from the client.
int64_t read_cookie_;
int64_t read_type_;
uint64_t read_length_;
std::vector<uint8_t> read_message_;
};
typedef
#if defined(BOOST_ASIO_HAS_LOCAL_SOCKETS)
boost::asio::local::stream_protocol
#else
boost::asio::generic::stream_protocol
#endif
local_stream_protocol;
typedef boost::asio::ip::tcp remote_stream_protocol;
using LocalServerConnection = ServerConnection<local_stream_protocol>;
using TcpServerConnection = ServerConnection<remote_stream_protocol>;
using LocalClientConnection = ClientConnection<local_stream_protocol>;
using TcpClientConnection = ClientConnection<remote_stream_protocol>;
} // namespace ray
#endif // RAY_COMMON_CLIENT_CONNECTION_H
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/common_protocol.cc
|
C++
|
#include "common_protocol.h"
#include "ray/util/logging.h"
std::string string_from_flatbuf(const flatbuffers::String &string) {
return std::string(string.data(), string.size());
}
std::vector<std::string> string_vec_from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &flatbuf_vec) {
std::vector<std::string> string_vector;
string_vector.reserve(flatbuf_vec.size());
for (int64_t i = 0; i < flatbuf_vec.size(); i++) {
const auto flatbuf_str = flatbuf_vec.Get(i);
string_vector.push_back(string_from_flatbuf(*flatbuf_str));
}
return string_vector;
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
string_vec_to_flatbuf(flatbuffers::FlatBufferBuilder &fbb,
const std::vector<std::string> &string_vector) {
std::vector<flatbuffers::Offset<flatbuffers::String>> flatbuf_str_vec;
flatbuf_str_vec.reserve(flatbuf_str_vec.size());
for (auto const &str : string_vector) {
flatbuf_str_vec.push_back(fbb.CreateString(str));
}
return fbb.CreateVector(flatbuf_str_vec);
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/common_protocol.h
|
C/C++ Header
|
#ifndef COMMON_PROTOCOL_H
#define COMMON_PROTOCOL_H
#include <flatbuffers/flatbuffers.h>
#include <unordered_set>
#include "ray/common/id.h"
#include "ray/util/logging.h"
/// Convert an unique ID to a flatbuffer string.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param id The ID to be converted.
/// @return The flatbuffer string containing the ID.
template <typename ID>
flatbuffers::Offset<flatbuffers::String> to_flatbuf(flatbuffers::FlatBufferBuilder &fbb,
ID id);
/// Convert a flatbuffer string to an unique ID.
///
/// @param string The flatbuffer string.
/// @return The ID.
template <typename ID>
ID from_flatbuf(const flatbuffers::String &string);
/// Convert a flatbuffer vector of strings to a vector of unique IDs.
///
/// @param vector The flatbuffer vector.
/// @return The vector of IDs.
template <typename ID>
const std::vector<ID> from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &vector);
/// Convert a flatbuffer vector of strings to an unordered_set of unique IDs.
///
/// @param vector The flatbuffer vector.
/// @return The unordered set of IDs.
template <typename ID>
const std::unordered_set<ID> unordered_set_from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &vector);
/// Convert a flatbuffer of string that concatenated
/// unique IDs to a vector of unique IDs.
///
/// @param vector The flatbuffer vector.
/// @return The vector of IDs.
template <typename ID>
const std::vector<ID> ids_from_flatbuf(const flatbuffers::String &string);
/// Convert a vector of unique IDs to a flatbuffer string.
/// The IDs are concatenated to a string with binary.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param ids The vector of IDs.
/// @return Flatbuffer string of concatenated IDs.
template <typename ID>
flatbuffers::Offset<flatbuffers::String> ids_to_flatbuf(
flatbuffers::FlatBufferBuilder &fbb, const std::vector<ID> &ids);
/// Convert an array of unique IDs to a flatbuffer vector of strings.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param ids Array of unique IDs.
/// @param num_ids Number of elements in the array.
/// @return Flatbuffer vector of strings.
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, ID ids[], int64_t num_ids);
/// Convert a vector of unique IDs to a flatbuffer vector of strings.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param ids Vector of IDs.
/// @return Flatbuffer vector of strings.
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, const std::vector<ID> &ids);
/// Convert an unordered_set of unique IDs to a flatbuffer vector of strings.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param ids Unordered set of IDs.
/// @return Flatbuffer vector of strings.
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, const std::unordered_set<ID> &ids);
/// Convert a flatbuffer string to a std::string.
///
/// @param fbb Reference to the flatbuffer builder.
/// @param string A flatbuffers string.
/// @return The std::string version of the flatbuffer string.
std::string string_from_flatbuf(const flatbuffers::String &string);
std::vector<std::string> string_vec_from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &flatbuf_vec);
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
string_vec_to_flatbuf(flatbuffers::FlatBufferBuilder &fbb,
const std::vector<std::string> &string_vector);
template <typename ID>
flatbuffers::Offset<flatbuffers::String> to_flatbuf(flatbuffers::FlatBufferBuilder &fbb,
ID id) {
return fbb.CreateString(reinterpret_cast<const char *>(id.Data()), id.Size());
}
template <typename ID>
ID from_flatbuf(const flatbuffers::String &string) {
return ID::FromBinary(string.str());
}
template <typename ID>
const std::vector<ID> from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &vector) {
std::vector<ID> ids;
for (int64_t i = 0; i < vector.Length(); i++) {
ids.push_back(from_flatbuf<ID>(*vector.Get(i)));
}
return ids;
}
template <typename ID>
const std::unordered_set<ID> unordered_set_from_flatbuf(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> &vector) {
std::unordered_set<ID> ids;
for (int64_t i = 0; i < vector.Length(); i++) {
ids.insert(from_flatbuf<ID>(*vector.Get(i)));
}
return ids;
}
template <typename ID>
const std::vector<ID> ids_from_flatbuf(const flatbuffers::String &string) {
const auto &ids = string_from_flatbuf(string);
std::vector<ID> ret;
size_t id_size = ID::Size();
RAY_CHECK(ids.size() % id_size == 0);
auto count = ids.size() / id_size;
for (size_t i = 0; i < count; ++i) {
auto pos = static_cast<size_t>(id_size * i);
const auto &id = ids.substr(pos, id_size);
ret.push_back(ID::FromBinary(id));
}
return ret;
}
template <typename ID>
flatbuffers::Offset<flatbuffers::String> ids_to_flatbuf(
flatbuffers::FlatBufferBuilder &fbb, const std::vector<ID> &ids) {
std::string result;
for (const auto &id : ids) {
result += id.Binary();
}
return fbb.CreateString(result);
}
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, ID ids[], int64_t num_ids) {
std::vector<flatbuffers::Offset<flatbuffers::String>> results;
for (int64_t i = 0; i < num_ids; i++) {
results.push_back(to_flatbuf(fbb, ids[i]));
}
return fbb.CreateVector(results);
}
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, const std::vector<ID> &ids) {
std::vector<flatbuffers::Offset<flatbuffers::String>> results;
for (auto id : ids) {
results.push_back(to_flatbuf(fbb, id));
}
return fbb.CreateVector(results);
}
template <typename ID>
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
to_flatbuf(flatbuffers::FlatBufferBuilder &fbb, const std::unordered_set<ID> &ids) {
std::vector<flatbuffers::Offset<flatbuffers::String>> results;
for (auto id : ids) {
results.push_back(to_flatbuf(fbb, id));
}
return fbb.CreateVector(results);
}
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/constants.h
|
C/C++ Header
|
#ifndef RAY_CONSTANTS_H_
#define RAY_CONSTANTS_H_
#include <limits.h>
#include <stdint.h>
/// Length of Ray full-length IDs in bytes.
constexpr size_t kUniqueIDSize = 20;
/// Length of plasma ID in bytes.
constexpr size_t kPlasmaIdSize = 20;
/// An ObjectID's bytes are split into the task ID itself and the index of the
/// object's creation. This is the maximum width of the object index in bits.
constexpr int kObjectIdIndexSize = 32;
static_assert(kObjectIdIndexSize % CHAR_BIT == 0,
"ObjectID prefix not a multiple of bytes");
/// Raylet exit code on plasma store socket error.
constexpr int kRayletStoreErrorExitCode = 100;
/// Prefix for the object table keys in redis.
constexpr char kObjectTablePrefix[] = "ObjectTable";
/// Prefix for the task table keys in redis.
constexpr char kTaskTablePrefix[] = "TaskTable";
constexpr char kWorkerDynamicOptionPlaceholderPrefix[] =
"RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_";
constexpr char kWorkerNumWorkersPlaceholder[] = "RAY_WORKER_NUM_WORKERS_PLACEHOLDER";
#endif // RAY_CONSTANTS_H_
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/grpc_util.h
|
C/C++ Header
|
#ifndef RAY_COMMON_GRPC_UTIL_H
#define RAY_COMMON_GRPC_UTIL_H
#include <google/protobuf/map.h>
#include <google/protobuf/repeated_field.h>
#include <grpcpp/grpcpp.h>
#include <sstream>
#include "status.h"
namespace ray {
/// Wrap a protobuf message.
template <class Message>
class MessageWrapper {
public:
/// Construct an empty message wrapper. This should not be used directly.
MessageWrapper() : message_(std::make_shared<Message>()) {}
/// Construct from a protobuf message object.
/// The input message will be **copied** into this object.
///
/// \param message The protobuf message.
explicit MessageWrapper(const Message message)
: message_(std::make_shared<Message>(std::move(message))) {}
/// Construct from a protobuf message shared_ptr.
///
/// \param message The protobuf message.
explicit MessageWrapper(std::shared_ptr<Message> message) : message_(message) {}
/// Construct from protobuf-serialized binary.
///
/// \param serialized_binary Protobuf-serialized binary.
explicit MessageWrapper(const std::string &serialized_binary)
: message_(std::make_shared<Message>()) {
message_->ParseFromString(serialized_binary);
}
/// Get const reference of the protobuf message.
const Message &GetMessage() const { return *message_; }
/// Get reference of the protobuf message.
Message &GetMutableMessage() { return *message_; }
/// Serialize the message to a string.
const std::string Serialize() const { return message_->SerializeAsString(); }
protected:
/// The wrapped message.
std::shared_ptr<Message> message_;
};
/// Helper function that converts a ray status to gRPC status.
inline grpc::Status RayStatusToGrpcStatus(const Status &ray_status) {
if (ray_status.ok()) {
return grpc::Status::OK;
} else {
// TODO(hchen): Use more specific error code.
return grpc::Status(grpc::StatusCode::UNKNOWN, ray_status.message());
}
}
/// Helper function that converts a gRPC status to ray status.
inline Status GrpcStatusToRayStatus(const grpc::Status &grpc_status) {
if (grpc_status.ok()) {
return Status::OK();
} else {
std::stringstream msg;
msg << grpc_status.error_code();
msg << ": ";
msg << grpc_status.error_message();
return Status::IOError(msg.str());
}
}
/// Converts a Protobuf `RepeatedPtrField` to a vector.
template <class T>
inline std::vector<T> VectorFromProtobuf(
const ::google::protobuf::RepeatedPtrField<T> &pb_repeated) {
return std::vector<T>(pb_repeated.begin(), pb_repeated.end());
}
/// Converts a Protobuf `RepeatedField` to a vector.
template <class T>
inline std::vector<T> VectorFromProtobuf(
const ::google::protobuf::RepeatedField<T> &pb_repeated) {
return std::vector<T>(pb_repeated.begin(), pb_repeated.end());
}
/// Converts a Protobuf `RepeatedField` to a vector of IDs.
template <class ID>
inline std::vector<ID> IdVectorFromProtobuf(
const ::google::protobuf::RepeatedPtrField<::std::string> &pb_repeated) {
auto str_vec = VectorFromProtobuf(pb_repeated);
std::vector<ID> ret;
std::transform(str_vec.begin(), str_vec.end(), std::back_inserter(ret),
&ID::FromBinary);
return ret;
}
/// Converts a Protobuf map to a `unordered_map`.
template <class K, class V>
inline std::unordered_map<K, V> MapFromProtobuf(::google::protobuf::Map<K, V> pb_map) {
return std::unordered_map<K, V>(pb_map.begin(), pb_map.end());
}
} // namespace ray
#endif
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/id.cc
|
C++
|
#include "ray/common/id.h"
#include <limits.h>
#include <algorithm>
#include <chrono>
#include <mutex>
#include <random>
#include "ray/common/constants.h"
#include "ray/common/status.h"
#include "ray/util/util.h"
extern "C" {
#include "ray/thirdparty/sha256.h"
}
// Definitions for computing hash digests.
#define DIGEST_SIZE SHA256_BLOCK_SIZE
namespace ray {
uint64_t MurmurHash64A(const void *key, int len, unsigned int seed);
/// A helper function to generate the unique bytes by hash.
std::string GenerateUniqueBytes(const JobID &job_id, const TaskID &parent_task_id,
size_t parent_task_counter, size_t length) {
RAY_CHECK(length <= DIGEST_SIZE);
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, reinterpret_cast<const BYTE *>(job_id.Data()), job_id.Size());
sha256_update(&ctx, reinterpret_cast<const BYTE *>(parent_task_id.Data()),
parent_task_id.Size());
sha256_update(&ctx, (const BYTE *)&parent_task_counter, sizeof(parent_task_counter));
BYTE buff[DIGEST_SIZE];
sha256_final(&ctx, buff);
return std::string(buff, buff + length);
}
namespace {
/// The bit offset of the flag `CreatedByTask` in a flags bytes.
constexpr uint8_t kCreatedByTaskBitsOffset = 15;
/// The bit offset of the flag `ObjectType` in a flags bytes.
constexpr uint8_t kObjectTypeBitsOffset = 14;
/// The bit offset of the flag `TransportType` in a flags bytes.
constexpr uint8_t kTransportTypeBitsOffset = 11;
/// The mask that is used to mask the flag `CreatedByTask`.
constexpr ObjectIDFlagsType kCreatedByTaskFlagBitMask = 0x1 << kCreatedByTaskBitsOffset;
/// The mask that is used to mask a bit to indicates the type of this object.
/// So it can represent for 2 types.
constexpr ObjectIDFlagsType kObjectTypeFlagBitMask = 0x1 << kObjectTypeBitsOffset;
/// The mask that is used to mask 3 bits to indicate the type of transport.
constexpr ObjectIDFlagsType kTransportTypeFlagBitMask = 0x7 << kTransportTypeBitsOffset;
/// The implementations of helper functions.
inline void SetCreatedByTaskFlag(bool created_by_task, ObjectIDFlagsType *flags) {
const ObjectIDFlagsType object_type_bits =
static_cast<ObjectIDFlagsType>(created_by_task) << kCreatedByTaskBitsOffset;
*flags = (*flags bitor object_type_bits);
}
inline void SetObjectTypeFlag(ObjectType object_type, ObjectIDFlagsType *flags) {
const ObjectIDFlagsType object_type_bits = static_cast<ObjectIDFlagsType>(object_type)
<< kObjectTypeBitsOffset;
*flags = (*flags bitor object_type_bits);
}
inline void SetTransportTypeFlag(uint8_t transport_type, ObjectIDFlagsType *flags) {
// TODO(ekl) we should be masking for all the SET operations in this file.
auto mask = static_cast<ObjectIDFlagsType>(1) << kTransportTypeBitsOffset;
const ObjectIDFlagsType transport_type_bits =
static_cast<ObjectIDFlagsType>(transport_type) << kTransportTypeBitsOffset;
*flags = ((*flags bitand ~mask) bitor transport_type_bits);
}
inline bool CreatedByTask(ObjectIDFlagsType flags) {
return ((flags bitand kCreatedByTaskFlagBitMask) >> kCreatedByTaskBitsOffset) != 0x0;
}
inline ObjectType GetObjectType(ObjectIDFlagsType flags) {
const ObjectIDFlagsType object_type =
(flags bitand kObjectTypeFlagBitMask) >> kObjectTypeBitsOffset;
return static_cast<ObjectType>(object_type);
}
inline uint8_t GetTransportType(ObjectIDFlagsType flags) {
const ObjectIDFlagsType transport_type =
(flags bitand kTransportTypeFlagBitMask) >> kTransportTypeBitsOffset;
return static_cast<uint8_t>(transport_type);
}
} // namespace
template <typename T>
void FillNil(T *data) {
RAY_CHECK(data != nullptr);
for (size_t i = 0; i < data->size(); i++) {
(*data)[i] = static_cast<uint8_t>(0xFF);
}
}
WorkerID ComputeDriverIdFromJob(const JobID &job_id) {
std::vector<uint8_t> data(WorkerID::Size(), 0);
std::memcpy(data.data(), job_id.Data(), JobID::Size());
std::fill_n(data.data() + JobID::Size(), WorkerID::Size() - JobID::Size(), 0xFF);
return WorkerID::FromBinary(
std::string(reinterpret_cast<const char *>(data.data()), data.size()));
}
ObjectID ObjectID::FromPlasmaIdBinary(const std::string &from) {
RAY_CHECK(from.size() == kPlasmaIdSize);
return ObjectID::FromBinary(from.substr(0, ObjectID::kLength));
}
plasma::UniqueID ObjectID::ToPlasmaId() const {
static_assert(ObjectID::kLength <= kPlasmaIdSize,
"Currently length of ObjectID must be shorter than plasma's.");
plasma::UniqueID result;
std::memcpy(result.mutable_data(), Data(), ObjectID::Size());
std::fill_n(result.mutable_data() + ObjectID::Size(), kPlasmaIdSize - ObjectID::kLength,
0xFF);
return result;
}
ObjectID::ObjectID(const plasma::UniqueID &from) {
RAY_CHECK(from.size() <= static_cast<int64_t>(ObjectID::Size())) << "Out of size.";
std::memcpy(this->MutableData(), from.data(), ObjectID::Size());
}
ObjectIDFlagsType ObjectID::GetFlags() const {
ObjectIDFlagsType flags;
std::memcpy(&flags, id_ + TaskID::kLength, sizeof(flags));
return flags;
}
bool ObjectID::CreatedByTask() const { return ::ray::CreatedByTask(this->GetFlags()); }
bool ObjectID::IsPutObject() const {
return ::ray::GetObjectType(this->GetFlags()) == ObjectType::PUT_OBJECT;
}
bool ObjectID::IsReturnObject() const {
return ::ray::GetObjectType(this->GetFlags()) == ObjectType::RETURN_OBJECT;
}
ObjectID ObjectID::WithTransportType(TaskTransportType transport_type) const {
ObjectID copy = ObjectID::FromBinary(Binary());
ObjectIDFlagsType flags = GetFlags();
SetTransportTypeFlag(static_cast<uint8_t>(transport_type), &flags);
std::memcpy(copy.id_ + TaskID::kLength, &flags, sizeof(flags));
return copy;
}
ObjectID ObjectID::WithPlasmaTransportType() const {
return WithTransportType(TaskTransportType::RAYLET);
}
ObjectID ObjectID::WithDirectTransportType() const {
return WithTransportType(TaskTransportType::DIRECT);
}
uint8_t ObjectID::GetTransportType() const {
return ::ray::GetTransportType(this->GetFlags());
}
// This code is from https://sites.google.com/site/murmurhash/
// and is public domain.
uint64_t MurmurHash64A(const void *key, int len, unsigned int seed) {
const uint64_t m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t *data = reinterpret_cast<const uint64_t *>(key);
const uint64_t *end = data + (len / 8);
while (data != end) {
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char *data2 = reinterpret_cast<const unsigned char *>(data);
switch (len & 7) {
case 7:
h ^= uint64_t(data2[6]) << 48;
case 6:
h ^= uint64_t(data2[5]) << 40;
case 5:
h ^= uint64_t(data2[4]) << 32;
case 4:
h ^= uint64_t(data2[3]) << 24;
case 3:
h ^= uint64_t(data2[2]) << 16;
case 2:
h ^= uint64_t(data2[1]) << 8;
case 1:
h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
ActorID ActorID::Of(const JobID &job_id, const TaskID &parent_task_id,
const size_t parent_task_counter) {
auto data = GenerateUniqueBytes(job_id, parent_task_id, parent_task_counter,
ActorID::kUniqueBytesLength);
std::copy_n(job_id.Data(), JobID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == kLength);
return ActorID::FromBinary(data);
}
ActorID ActorID::NilFromJob(const JobID &job_id) {
std::string data(kUniqueBytesLength, 0);
FillNil(&data);
std::copy_n(job_id.Data(), JobID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == kLength);
return ActorID::FromBinary(data);
}
JobID ActorID::JobId() const {
RAY_CHECK(!IsNil());
return JobID::FromBinary(std::string(
reinterpret_cast<const char *>(this->Data() + kUniqueBytesLength), JobID::kLength));
}
TaskID TaskID::ForDriverTask(const JobID &job_id) {
std::string data(kUniqueBytesLength, 0);
FillNil(&data);
const auto dummy_actor_id = ActorID::NilFromJob(job_id);
std::copy_n(dummy_actor_id.Data(), ActorID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == TaskID::kLength);
return TaskID::FromBinary(data);
}
TaskID TaskID::ForFakeTask() {
std::string data(kLength, 0);
FillRandom(&data);
return TaskID::FromBinary(data);
}
TaskID TaskID::ForActorCreationTask(const ActorID &actor_id) {
std::string data(kUniqueBytesLength, 0);
FillNil(&data);
std::copy_n(actor_id.Data(), ActorID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == TaskID::kLength);
return TaskID::FromBinary(data);
}
TaskID TaskID::ForActorTask(const JobID &job_id, const TaskID &parent_task_id,
size_t parent_task_counter, const ActorID &actor_id) {
std::string data = GenerateUniqueBytes(job_id, parent_task_id, parent_task_counter,
TaskID::kUniqueBytesLength);
std::copy_n(actor_id.Data(), ActorID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == TaskID::kLength);
return TaskID::FromBinary(data);
}
TaskID TaskID::ForNormalTask(const JobID &job_id, const TaskID &parent_task_id,
size_t parent_task_counter) {
std::string data = GenerateUniqueBytes(job_id, parent_task_id, parent_task_counter,
TaskID::kUniqueBytesLength);
const auto dummy_actor_id = ActorID::NilFromJob(job_id);
std::copy_n(dummy_actor_id.Data(), ActorID::kLength, std::back_inserter(data));
RAY_CHECK(data.size() == TaskID::kLength);
return TaskID::FromBinary(data);
}
ActorID TaskID::ActorId() const {
return ActorID::FromBinary(std::string(
reinterpret_cast<const char *>(id_ + kUniqueBytesLength), ActorID::Size()));
}
JobID TaskID::JobId() const { return ActorId().JobId(); }
TaskID TaskID::ComputeDriverTaskId(const WorkerID &driver_id) {
std::string driver_id_str = driver_id.Binary();
driver_id_str.resize(Size());
return TaskID::FromBinary(driver_id_str);
}
TaskID ObjectID::TaskId() const {
return TaskID::FromBinary(
std::string(reinterpret_cast<const char *>(id_), TaskID::Size()));
}
ObjectID ObjectID::ForPut(const TaskID &task_id, ObjectIDIndexType put_index,
uint8_t transport_type) {
RAY_CHECK(put_index >= 1 && put_index <= kMaxObjectIndex) << "index=" << put_index;
ObjectIDFlagsType flags = 0x0000;
SetCreatedByTaskFlag(true, &flags);
SetObjectTypeFlag(ObjectType::PUT_OBJECT, &flags);
SetTransportTypeFlag(transport_type, &flags);
return GenerateObjectId(task_id.Binary(), flags, put_index);
}
ObjectIDIndexType ObjectID::ObjectIndex() const {
ObjectIDIndexType index;
std::memcpy(&index, id_ + TaskID::kLength + kFlagsBytesLength, sizeof(index));
return index;
}
ObjectID ObjectID::ForTaskReturn(const TaskID &task_id, ObjectIDIndexType return_index,
uint8_t transport_type) {
RAY_CHECK(return_index >= 1 && return_index <= kMaxObjectIndex)
<< "index=" << return_index;
ObjectIDFlagsType flags = 0x0000;
SetCreatedByTaskFlag(true, &flags);
SetObjectTypeFlag(ObjectType::RETURN_OBJECT, &flags);
SetTransportTypeFlag(transport_type, &flags);
return GenerateObjectId(task_id.Binary(), flags, return_index);
}
ObjectID ObjectID::FromRandom() {
ObjectIDFlagsType flags = 0x0000;
SetCreatedByTaskFlag(false, &flags);
// No need to set transport type for a random object id.
// No need to assign put_index/return_index bytes.
std::vector<uint8_t> task_id_bytes(TaskID::kLength, 0x0);
FillRandom(&task_id_bytes);
return GenerateObjectId(
std::string(reinterpret_cast<const char *>(task_id_bytes.data()),
task_id_bytes.size()),
flags);
}
ObjectID ObjectID::GenerateObjectId(const std::string &task_id_binary,
ObjectIDFlagsType flags,
ObjectIDIndexType object_index) {
RAY_CHECK(task_id_binary.size() == TaskID::Size());
ObjectID ret = ObjectID::Nil();
std::memcpy(ret.id_, task_id_binary.c_str(), TaskID::kLength);
std::memcpy(ret.id_ + TaskID::kLength, &flags, sizeof(flags));
std::memcpy(ret.id_ + TaskID::kLength + kFlagsBytesLength, &object_index,
sizeof(object_index));
return ret;
}
JobID JobID::FromInt(uint16_t value) {
std::vector<uint8_t> data(JobID::Size(), 0);
std::memcpy(data.data(), &value, JobID::Size());
return JobID::FromBinary(
std::string(reinterpret_cast<const char *>(data.data()), data.size()));
}
#define ID_OSTREAM_OPERATOR(id_type) \
std::ostream &operator<<(std::ostream &os, const id_type &id) { \
if (id.IsNil()) { \
os << "NIL_ID"; \
} else { \
os << id.Hex(); \
} \
return os; \
}
ID_OSTREAM_OPERATOR(UniqueID);
ID_OSTREAM_OPERATOR(JobID);
ID_OSTREAM_OPERATOR(ActorID);
ID_OSTREAM_OPERATOR(TaskID);
ID_OSTREAM_OPERATOR(ObjectID);
} // namespace ray
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
src/ray/common/id.h
|
C/C++ Header
|
#ifndef RAY_ID_H_
#define RAY_ID_H_
#include <inttypes.h>
#include <limits.h>
#include <chrono>
#include <cstring>
#include <mutex>
#include <random>
#include <string>
#include "plasma/common.h"
#include "ray/common/constants.h"
#include "ray/util/logging.h"
#include "ray/util/util.h"
#include "ray/util/visibility.h"
namespace ray {
enum class TaskTransportType { RAYLET, DIRECT };
class TaskID;
class WorkerID;
class UniqueID;
class JobID;
/// TODO(qwang): These 2 helper functions should be removed
/// once we separated the `WorkerID` from `UniqueID`.
///
/// A helper function that get the `DriverID` of the given job.
WorkerID ComputeDriverIdFromJob(const JobID &job_id);
/// The type of this object. `PUT_OBJECT` indicates this object
/// is generated through `ray.put` during the task's execution.
/// And `RETURN_OBJECT` indicates this object is the return value
/// of a task.
enum class ObjectType : uint8_t {
PUT_OBJECT = 0x0,
RETURN_OBJECT = 0x1,
};
using ObjectIDFlagsType = uint16_t;
using ObjectIDIndexType = uint32_t;
// Declaration.
uint64_t MurmurHash64A(const void *key, int len, unsigned int seed);
// Change the compiler alignment to 1 byte (default is 8).
#pragma pack(push, 1)
/// The `ID`s of Ray.
///
/// Please refer to the specification of Ray UniqueIDs.
/// https://github.com/ray-project/ray/blob/master/src/ray/design_docs/id_specification.md
template <typename T>
class BaseID {
public:
BaseID();
// Warning: this can duplicate IDs after a fork() call. We assume this never happens.
static T FromRandom();
static T FromBinary(const std::string &binary);
static const T &Nil();
static size_t Size() { return T::Size(); }
size_t Hash() const;
bool IsNil() const;
bool operator==(const BaseID &rhs) const;
bool operator!=(const BaseID &rhs) const;
const uint8_t *Data() const;
std::string Binary() const;
std::string Hex() const;
protected:
BaseID(const std::string &binary) {
std::memcpy(const_cast<uint8_t *>(this->Data()), binary.data(), T::Size());
}
// All IDs are immutable for hash evaluations. MutableData is only allow to use
// in construction time, so this function is protected.
uint8_t *MutableData();
// For lazy evaluation, be careful to have one Id contained in another.
// This hash code will be duplicated.
mutable size_t hash_ = 0;
};
class UniqueID : public BaseID<UniqueID> {
public:
static size_t Size() { return kUniqueIDSize; }
UniqueID() : BaseID() {}
protected:
UniqueID(const std::string &binary);
protected:
uint8_t id_[kUniqueIDSize];
};
class JobID : public BaseID<JobID> {
public:
static constexpr int64_t kLength = 2;
static JobID FromInt(uint16_t value);
static size_t Size() { return kLength; }
// Warning: this can duplicate IDs after a fork() call. We assume this never happens.
static JobID FromRandom() = delete;
JobID() : BaseID() {}
private:
uint8_t id_[kLength];
};
class ActorID : public BaseID<ActorID> {
private:
static constexpr size_t kUniqueBytesLength = 4;
public:
/// Length of `ActorID` in bytes.
static constexpr size_t kLength = kUniqueBytesLength + JobID::kLength;
/// Size of `ActorID` in bytes.
///
/// \return Size of `ActorID` in bytes.
static size_t Size() { return kLength; }
/// Creates an `ActorID` by hashing the given information.
///
/// \param job_id The job id to which this actor belongs.
/// \param parent_task_id The id of the task which created this actor.
/// \param parent_task_counter The counter of the parent task.
///
/// \return The random `ActorID`.
static ActorID Of(const JobID &job_id, const TaskID &parent_task_id,
const size_t parent_task_counter);
/// Creates a nil ActorID with the given job.
///
/// \param job_id The job id to which this actor belongs.
///
/// \return The `ActorID` with unique bytes being nil.
static ActorID NilFromJob(const JobID &job_id);
// Warning: this can duplicate IDs after a fork() call. We assume this never happens.
static ActorID FromRandom() = delete;
/// Constructor of `ActorID`.
ActorID() : BaseID() {}
/// Get the job id to which this actor belongs.
///
/// \return The job id to which this actor belongs.
JobID JobId() const;
private:
uint8_t id_[kLength];
};
class TaskID : public BaseID<TaskID> {
private:
static constexpr size_t kUniqueBytesLength = 8;
public:
static constexpr size_t kLength = kUniqueBytesLength + ActorID::kLength;
TaskID() : BaseID() {}
static size_t Size() { return kLength; }
static TaskID ComputeDriverTaskId(const WorkerID &driver_id);
// Warning: this can duplicate IDs after a fork() call. We assume this never happens.
static TaskID FromRandom() = delete;
/// The ID generated for driver task.
static TaskID ForDriverTask(const JobID &job_id);
/// Generate driver task id for the given job.
static TaskID ForFakeTask();
/// Creates a TaskID for an actor creation task.
///
/// \param actor_id The ID of the actor that will be created
/// by this actor creation task.
///
/// \return The ID of the actor creation task.
static TaskID ForActorCreationTask(const ActorID &actor_id);
/// Creates a TaskID for actor task.
///
/// \param job_id The ID of the job to which this task belongs.
/// \param parent_task_id The ID of the parent task which submitted this task.
/// \param parent_task_counter A count of the number of tasks submitted by the
/// parent task before this one.
/// \param actor_id The ID of the actor to which this task belongs.
///
/// \return The ID of the actor task.
static TaskID ForActorTask(const JobID &job_id, const TaskID &parent_task_id,
size_t parent_task_counter, const ActorID &actor_id);
/// Creates a TaskID for normal task.
///
/// \param job_id The ID of the job to which this task belongs.
/// \param parent_task_id The ID of the parent task which submitted this task.
/// \param parent_task_counter A count of the number of tasks submitted by the
/// parent task before this one.
///
/// \return The ID of the normal task.
static TaskID ForNormalTask(const JobID &job_id, const TaskID &parent_task_id,
size_t parent_task_counter);
/// Get the id of the actor to which this task belongs.
///
/// \return The `ActorID` of the actor which creates this task.
ActorID ActorId() const;
/// Get the id of the job to which this task belongs.
///
/// \return The `JobID` of the job which creates this task.
JobID JobId() const;
private:
uint8_t id_[kLength];
};
class ObjectID : public BaseID<ObjectID> {
private:
static constexpr size_t kIndexBytesLength = sizeof(ObjectIDIndexType);
static constexpr size_t kFlagsBytesLength = sizeof(ObjectIDFlagsType);
public:
/// The maximum number of objects that can be returned or put by a task.
static constexpr int64_t kMaxObjectIndex = ((int64_t)1 << kObjectIdIndexSize) - 1;
/// The length of ObjectID in bytes.
static constexpr size_t kLength =
kIndexBytesLength + kFlagsBytesLength + TaskID::kLength;
ObjectID() : BaseID() {}
/// The maximum index of object.
///
/// It also means the max number of objects created (put or return) by one task.
///
/// \return The maximum index of object.
static uint64_t MaxObjectIndex() { return kMaxObjectIndex; }
static size_t Size() { return kLength; }
/// Generate ObjectID by the given binary string of a plasma id.
///
/// \param from The binary string of the given plasma id.
/// \return The ObjectID converted from a binary string of the plasma id.
static ObjectID FromPlasmaIdBinary(const std::string &from);
plasma::ObjectID ToPlasmaId() const;
ObjectID(const plasma::UniqueID &from);
/// Get the index of this object in the task that created it.
///
/// \return The index of object creation according to the task that created
/// this object.
ObjectIDIndexType ObjectIndex() const;
/// Compute the task ID of the task that created the object.
///
/// \return The task ID of the task that created this object.
TaskID TaskId() const;
/// Whether this object is created by a task.
///
/// \return True if this object is created by a task, otherwise false.
bool CreatedByTask() const;
/// Whether this object was created through `ray.put`.
///
/// \return True if this object was created through `ray.put`.
bool IsPutObject() const;
/// Whether this object was created as a return object of a task.
///
/// \return True if this object is a return value of a task.
bool IsReturnObject() const;
/// Return if this is a direct actor call object.
///
/// \return True if this is a direct actor object return.
bool IsDirectCallType() const {
return GetTransportType() == static_cast<uint8_t>(TaskTransportType::DIRECT);
}
/// Return this object id with a changed transport type.
///
/// \return Copy of this object id with the specified transport type.
ObjectID WithTransportType(TaskTransportType transport_type) const;
/// Return this object id with the plasma transport type.
///
/// \return Copy of this object id with the plasma transport type.
ObjectID WithPlasmaTransportType() const;
/// Return this object id with the direct call transport type.
///
/// \return Copy of this object id with the direct call transport type.
ObjectID WithDirectTransportType() const;
/// Get the transport type of this object.
///
/// \return The type of the transport which is used to transfer this object.
uint8_t GetTransportType() const;
/// Compute the object ID of an object put by the task.
///
/// \param task_id The task ID of the task that created the object.
/// \param index What index of the object put in the task.
/// \param transport_type Which type of the transport that is used to
/// transfer this object.
///
/// \return The computed object ID.
static ObjectID ForPut(const TaskID &task_id, ObjectIDIndexType put_index,
uint8_t transport_type);
/// Compute the object ID of an object returned by the task.
///
/// \param task_id The task ID of the task that created the object.
/// \param return_index What index of the object returned by in the task.
/// \param transport_type Which type of the transport that is used to
/// transfer this object.
///
/// \return The computed object ID.
static ObjectID ForTaskReturn(const TaskID &task_id, ObjectIDIndexType return_index,
uint8_t transport_type);
/// Create an object id randomly.
///
/// Warning: this can duplicate IDs after a fork() call. We assume this
/// never happens.
///
/// \param transport_type Which type of the transport that is used to
/// transfer this object.
///
/// \return A random object id.
static ObjectID FromRandom();
private:
/// A helper method to generate an ObjectID.
static ObjectID GenerateObjectId(const std::string &task_id_binary,
ObjectIDFlagsType flags,
ObjectIDIndexType object_index = 0);
/// Get the flags out of this object id.
ObjectIDFlagsType GetFlags() const;
private:
uint8_t id_[kLength];
};
static_assert(sizeof(JobID) == JobID::kLength + sizeof(size_t),
"JobID size is not as expected");
static_assert(sizeof(ActorID) == ActorID::kLength + sizeof(size_t),
"ActorID size is not as expected");
static_assert(sizeof(TaskID) == TaskID::kLength + sizeof(size_t),
"TaskID size is not as expected");
static_assert(sizeof(ObjectID) == ObjectID::kLength + sizeof(size_t),
"ObjectID size is not as expected");
std::ostream &operator<<(std::ostream &os, const UniqueID &id);
std::ostream &operator<<(std::ostream &os, const JobID &id);
std::ostream &operator<<(std::ostream &os, const ActorID &id);
std::ostream &operator<<(std::ostream &os, const TaskID &id);
std::ostream &operator<<(std::ostream &os, const ObjectID &id);
#define DEFINE_UNIQUE_ID(type) \
class RAY_EXPORT type : public UniqueID { \
public: \
explicit type(const UniqueID &from) { \
std::memcpy(&id_, from.Data(), kUniqueIDSize); \
} \
type() : UniqueID() {} \
static type FromRandom() { return type(UniqueID::FromRandom()); } \
static type FromBinary(const std::string &binary) { return type(binary); } \
static type Nil() { return type(UniqueID::Nil()); } \
static size_t Size() { return kUniqueIDSize; } \
\
private: \
explicit type(const std::string &binary) { \
std::memcpy(&id_, binary.data(), kUniqueIDSize); \
} \
};
#include "id_def.h"
#undef DEFINE_UNIQUE_ID
// Restore the compiler alignment to default (8 bytes).
#pragma pack(pop)
template <typename T>
BaseID<T>::BaseID() {
// Using const_cast to directly change data is dangerous. The cached
// hash may not be changed. This is used in construction time.
std::fill_n(this->MutableData(), T::Size(), 0xff);
}
template <typename T>
T BaseID<T>::FromRandom() {
std::string data(T::Size(), 0);
FillRandom(&data);
return T::FromBinary(data);
}
template <typename T>
T BaseID<T>::FromBinary(const std::string &binary) {
RAY_CHECK(binary.size() == T::Size())
<< "expected size is " << T::Size() << ", but got " << binary.size();
T t = T::Nil();
std::memcpy(t.MutableData(), binary.data(), T::Size());
return t;
}
template <typename T>
const T &BaseID<T>::Nil() {
static const T nil_id;
return nil_id;
}
template <typename T>
bool BaseID<T>::IsNil() const {
static T nil_id = T::Nil();
return *this == nil_id;
}
template <typename T>
size_t BaseID<T>::Hash() const {
// Note(ashione): hash code lazy calculation(it's invoked every time if hash code is
// default value 0)
if (!hash_) {
hash_ = MurmurHash64A(Data(), T::Size(), 0);
}
return hash_;
}
template <typename T>
bool BaseID<T>::operator==(const BaseID &rhs) const {
return std::memcmp(Data(), rhs.Data(), T::Size()) == 0;
}
template <typename T>
bool BaseID<T>::operator!=(const BaseID &rhs) const {
return !(*this == rhs);
}
template <typename T>
uint8_t *BaseID<T>::MutableData() {
return reinterpret_cast<uint8_t *>(this) + sizeof(hash_);
}
template <typename T>
const uint8_t *BaseID<T>::Data() const {
return reinterpret_cast<const uint8_t *>(this) + sizeof(hash_);
}
template <typename T>
std::string BaseID<T>::Binary() const {
return std::string(reinterpret_cast<const char *>(Data()), T::Size());
}
template <typename T>
std::string BaseID<T>::Hex() const {
constexpr char hex[] = "0123456789abcdef";
const uint8_t *id = Data();
std::string result;
for (int i = 0; i < T::Size(); i++) {
unsigned int val = id[i];
result.push_back(hex[val >> 4]);
result.push_back(hex[val & 0xf]);
}
return result;
}
} // namespace ray
namespace std {
#define DEFINE_UNIQUE_ID(type) \
template <> \
struct hash<::ray::type> { \
size_t operator()(const ::ray::type &id) const { return id.Hash(); } \
}; \
template <> \
struct hash<const ::ray::type> { \
size_t operator()(const ::ray::type &id) const { return id.Hash(); } \
};
DEFINE_UNIQUE_ID(UniqueID);
DEFINE_UNIQUE_ID(JobID);
DEFINE_UNIQUE_ID(ActorID);
DEFINE_UNIQUE_ID(TaskID);
DEFINE_UNIQUE_ID(ObjectID);
#include "id_def.h"
#undef DEFINE_UNIQUE_ID
} // namespace std
#endif // RAY_ID_H_
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.