file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
rllib/agents/ppo/ppo_policy.py
|
Python
|
import logging
import ray
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule, ACTION_LOGP
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils.tf_ops import make_tf_callable
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
# Frozen logits of the policy that computed the action
BEHAVIOUR_LOGITS = "behaviour_logits"
class PPOLoss:
def __init__(self,
action_space,
dist_class,
model,
value_targets,
advantages,
actions,
prev_logits,
prev_actions_logp,
vf_preds,
curr_action_dist,
value_fn,
cur_kl_coeff,
valid_mask,
entropy_coeff=0,
clip_param=0.1,
vf_clip_param=0.1,
vf_loss_coeff=1.0,
use_gae=True,
model_config=None):
"""Constructs the loss for Proximal Policy Objective.
Arguments:
action_space: Environment observation space specification.
dist_class: action distribution class for logits.
value_targets (Placeholder): Placeholder for target values; used
for GAE.
actions (Placeholder): Placeholder for actions taken
from previous model evaluation.
advantages (Placeholder): Placeholder for calculated advantages
from previous model evaluation.
prev_logits (Placeholder): Placeholder for logits output from
previous model evaluation.
prev_actions_logp (Placeholder): Placeholder for prob output from
previous model evaluation.
vf_preds (Placeholder): Placeholder for value function output
from previous model evaluation.
curr_action_dist (ActionDistribution): ActionDistribution
of the current model.
value_fn (Tensor): Current value function output Tensor.
cur_kl_coeff (Variable): Variable holding the current PPO KL
coefficient.
valid_mask (Tensor): A bool mask of valid input elements (#2992).
entropy_coeff (float): Coefficient of the entropy regularizer.
clip_param (float): Clip parameter
vf_clip_param (float): Clip parameter for the value function
vf_loss_coeff (float): Coefficient of the value function loss
use_gae (bool): If true, use the Generalized Advantage Estimator.
model_config (dict): (Optional) model config for use in specifying
action distributions.
"""
def reduce_mean_valid(t):
return tf.reduce_mean(tf.boolean_mask(t, valid_mask))
prev_dist = dist_class(prev_logits, model)
# Make loss functions.
logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp)
action_kl = prev_dist.kl(curr_action_dist)
self.mean_kl = reduce_mean_valid(action_kl)
curr_entropy = curr_action_dist.entropy()
self.mean_entropy = reduce_mean_valid(curr_entropy)
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_policy_loss = reduce_mean_valid(-surrogate_loss)
if use_gae:
vf_loss1 = tf.square(value_fn - value_targets)
vf_clipped = vf_preds + tf.clip_by_value(
value_fn - vf_preds, -vf_clip_param, vf_clip_param)
vf_loss2 = tf.square(vf_clipped - value_targets)
vf_loss = tf.maximum(vf_loss1, vf_loss2)
self.mean_vf_loss = reduce_mean_valid(vf_loss)
loss = reduce_mean_valid(
-surrogate_loss + cur_kl_coeff * action_kl +
vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy)
else:
self.mean_vf_loss = tf.constant(0.0)
loss = reduce_mean_valid(-surrogate_loss +
cur_kl_coeff * action_kl -
entropy_coeff * curr_entropy)
self.loss = loss
def ppo_surrogate_loss(policy, model, dist_class, train_batch):
logits, state = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
if state:
max_seq_len = tf.reduce_max(train_batch["seq_lens"])
mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(
train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool)
policy.loss_obj = PPOLoss(
policy.action_space,
dist_class,
model,
train_batch[Postprocessing.VALUE_TARGETS],
train_batch[Postprocessing.ADVANTAGES],
train_batch[SampleBatch.ACTIONS],
train_batch[BEHAVIOUR_LOGITS],
train_batch[ACTION_LOGP],
train_batch[SampleBatch.VF_PREDS],
action_dist,
model.value_function(),
policy.kl_coeff,
mask,
entropy_coeff=policy.entropy_coeff,
clip_param=policy.config["clip_param"],
vf_clip_param=policy.config["vf_clip_param"],
vf_loss_coeff=policy.config["vf_loss_coeff"],
use_gae=policy.config["use_gae"],
model_config=policy.config["model"])
return policy.loss_obj.loss
def kl_and_loss_stats(policy, train_batch):
return {
"cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64),
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"total_loss": policy.loss_obj.loss,
"policy_loss": policy.loss_obj.mean_policy_loss,
"vf_loss": policy.loss_obj.mean_vf_loss,
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.model.value_function()),
"kl": policy.loss_obj.mean_kl,
"entropy": policy.loss_obj.mean_entropy,
"entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64),
}
def vf_preds_and_logits_fetches(policy):
"""Adds value function and logits outputs to experience train_batches."""
return {
SampleBatch.VF_PREDS: policy.model.value_function(),
BEHAVIOUR_LOGITS: policy.model.last_output(),
}
def postprocess_ppo_gae(policy,
sample_batch,
other_agent_batches=None,
episode=None):
"""Adds the policy logits, VF preds, and advantages to the trajectory."""
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"])
return batch
def clip_gradients(policy, optimizer, loss):
variables = policy.model.trainable_variables()
if policy.config["grad_clip"] is not None:
grads_and_vars = optimizer.compute_gradients(loss, variables)
grads = [g for (g, v) in grads_and_vars]
policy.grads, _ = tf.clip_by_global_norm(grads,
policy.config["grad_clip"])
clipped_grads = list(zip(policy.grads, variables))
return clipped_grads
else:
return optimizer.compute_gradients(loss, variables)
class KLCoeffMixin:
def __init__(self, config):
# KL Coefficient
self.kl_coeff_val = config["kl_coeff"]
self.kl_target = config["kl_target"]
self.kl_coeff = tf.get_variable(
initializer=tf.constant_initializer(self.kl_coeff_val),
name="kl_coeff",
shape=(),
trainable=False,
dtype=tf.float32)
def update_kl(self, sampled_kl):
if sampled_kl > 2.0 * self.kl_target:
self.kl_coeff_val *= 1.5
elif sampled_kl < 0.5 * self.kl_target:
self.kl_coeff_val *= 0.5
self.kl_coeff.load(self.kl_coeff_val, session=self.get_session())
return self.kl_coeff_val
class ValueNetworkMixin:
def __init__(self, obs_space, action_space, config):
if config["use_gae"]:
@make_tf_callable(self.get_session())
def value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model({
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor(
[prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor(
[prev_reward]),
"is_training": tf.convert_to_tensor(False),
}, [tf.convert_to_tensor([s]) for s in state],
tf.convert_to_tensor([1]))
return self.model.value_function()[0]
else:
@make_tf_callable(self.get_session())
def value(ob, prev_action, prev_reward, *state):
return tf.constant(0.0)
self._value = value
def setup_config(policy, obs_space, action_space, config):
# auto set the model option for layer sharing
config["model"]["vf_share_layers"] = config["vf_share_layers"]
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy, obs_space, action_space, config)
KLCoeffMixin.__init__(policy, config)
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
PPOTFPolicy = build_tf_policy(
name="PPOTFPolicy",
get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG,
loss_fn=ppo_surrogate_loss,
stats_fn=kl_and_loss_stats,
extra_action_fetches_fn=vf_preds_and_logits_fetches,
postprocess_fn=postprocess_ppo_gae,
gradients_fn=clip_gradients,
before_init=setup_config,
before_loss_init=setup_mixins,
mixins=[
LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin,
ValueNetworkMixin
])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/test/test.py
|
Python
|
import unittest
import numpy as np
from numpy.testing import assert_allclose
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.agents.ppo.utils import flatten, concatenate
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# TODO(ekl): move to rllib/models dir
class DistributionsTest(unittest.TestCase):
def testCategorical(self):
num_samples = 100000
logits = tf.placeholder(tf.float32, shape=(None, 10))
z = 8 * (np.random.rand(10) - 0.5)
data = np.tile(z, (num_samples, 1))
c = Categorical(logits, {}) # dummy config dict
sample_op = c.sample()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
samples = sess.run(sample_op, feed_dict={logits: data})
counts = np.zeros(10)
for sample in samples:
counts[sample] += 1.0
probs = np.exp(z) / np.sum(np.exp(z))
self.assertTrue(np.sum(np.abs(probs - counts / num_samples)) <= 0.01)
class UtilsTest(unittest.TestCase):
def testFlatten(self):
d = {
"s": np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]]),
"a": np.array([[[5], [-5]], [[6], [-6]]])
}
flat = flatten(d.copy(), start=0, stop=2)
assert_allclose(d["s"][0][0][:], flat["s"][0][:])
assert_allclose(d["s"][0][1][:], flat["s"][1][:])
assert_allclose(d["s"][1][0][:], flat["s"][2][:])
assert_allclose(d["s"][1][1][:], flat["s"][3][:])
assert_allclose(d["a"][0][0], flat["a"][0])
assert_allclose(d["a"][0][1], flat["a"][1])
assert_allclose(d["a"][1][0], flat["a"][2])
assert_allclose(d["a"][1][1], flat["a"][3])
def testConcatenate(self):
d1 = {"s": np.array([0, 1]), "a": np.array([2, 3])}
d2 = {"s": np.array([4, 5]), "a": np.array([6, 7])}
d = concatenate([d1, d2])
assert_allclose(d["s"], np.array([0, 1, 4, 5]))
assert_allclose(d["a"], np.array([2, 3, 6, 7]))
D = concatenate([d])
assert_allclose(D["s"], np.array([0, 1, 4, 5]))
assert_allclose(D["a"], np.array([2, 3, 6, 7]))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/utils.py
|
Python
|
import numpy as np
def flatten(weights, start=0, stop=2):
"""This methods reshapes all values in a dictionary.
The indices from start to stop will be flattened into a single index.
Args:
weights: A dictionary mapping keys to numpy arrays.
start: The starting index.
stop: The ending index.
"""
for key, val in weights.items():
new_shape = val.shape[0:start] + (-1, ) + val.shape[stop:]
weights[key] = val.reshape(new_shape)
return weights
def concatenate(weights_list):
keys = weights_list[0].keys()
result = {}
for key in keys:
result[key] = np.concatenate([l[key] for l in weights_list])
return result
def shuffle(trajectory):
permutation = np.random.permutation(trajectory["actions"].shape[0])
for key, val in trajectory.items():
trajectory[key] = val[permutation]
return trajectory
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/__init__.py
|
Python
|
from ray.rllib.agents.qmix.qmix import QMixTrainer, DEFAULT_CONFIG
from ray.rllib.agents.qmix.apex import ApexQMixTrainer
__all__ = ["QMixTrainer", "ApexQMixTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/apex.py
|
Python
|
"""Experimental: scalable Ape-X variant of QMIX"""
from ray.rllib.agents.dqn.apex import APEX_TRAINER_PROPERTIES
from ray.rllib.agents.qmix.qmix import QMixTrainer, \
DEFAULT_CONFIG as QMIX_CONFIG
from ray.rllib.utils import merge_dicts
APEX_QMIX_DEFAULT_CONFIG = merge_dicts(
QMIX_CONFIG, # see also the options in qmix.py, which are also supported
{
"optimizer": merge_dicts(
QMIX_CONFIG["optimizer"],
{
"max_weight_sync_delay": 400,
"num_replay_buffer_shards": 4,
"batch_replay": True, # required for RNN. Disables prio.
"debug": False
}),
"num_gpus": 0,
"num_workers": 32,
"buffer_size": 2000000,
"learning_starts": 50000,
"train_batch_size": 512,
"sample_batch_size": 50,
"target_network_update_freq": 500000,
"timesteps_per_iteration": 25000,
"per_worker_exploration": True,
"min_iter_time_s": 30,
},
)
ApexQMixTrainer = QMixTrainer.with_updates(
name="APEX_QMIX",
default_config=APEX_QMIX_DEFAULT_CONFIG,
**APEX_TRAINER_PROPERTIES)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/mixers.py
|
Python
|
import numpy as np
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
F = nn.functional
class VDNMixer(nn.Module):
def __init__(self):
super(VDNMixer, self).__init__()
def forward(self, agent_qs, batch):
return torch.sum(agent_qs, dim=2, keepdim=True)
class QMixer(nn.Module):
def __init__(self, n_agents, state_shape, mixing_embed_dim):
super(QMixer, self).__init__()
self.n_agents = n_agents
self.embed_dim = mixing_embed_dim
self.state_dim = int(np.prod(state_shape))
self.hyper_w_1 = nn.Linear(self.state_dim,
self.embed_dim * self.n_agents)
self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
# State dependent bias for hidden layer
self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)
# V(s) instead of a bias for the last layers
self.V = nn.Sequential(
nn.Linear(self.state_dim, self.embed_dim), nn.ReLU(),
nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states):
"""Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
"""
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
# First layer
w1 = torch.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(torch.bmm(agent_qs, w1) + b1)
# Second layer
w_final = torch.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = torch.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/model.py
|
Python
|
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
F = nn.functional
class RNNModel(TorchModelV2, nn.Module):
"""The default RNN model for QMIX."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.obs_size = _get_size(obs_space)
self.rnn_hidden_dim = model_config["lstm_cell_size"]
self.fc1 = nn.Linear(self.obs_size, self.rnn_hidden_dim)
self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)
self.fc2 = nn.Linear(self.rnn_hidden_dim, num_outputs)
@override(TorchModelV2)
def get_initial_state(self):
# make hidden states on same device as model
return [self.fc1.weight.new(1, self.rnn_hidden_dim).zero_().squeeze(0)]
@override(TorchModelV2)
def forward(self, input_dict, hidden_state, seq_lens):
x = F.relu(self.fc1(input_dict["obs_flat"].float()))
h_in = hidden_state[0].reshape(-1, self.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q, [h]
def _get_size(obs_space):
return get_preprocessor(obs_space)(obs_space).size
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/qmix.py
|
Python
|
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.agents.qmix.qmix_policy import QMixTorchPolicy
from ray.rllib.optimizers import SyncBatchReplayOptimizer
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === QMix ===
# Mixing network. Either "qmix", "vdn", or None
"mixer": "qmix",
# Size of the mixing network embedding
"mixing_embed_dim": 32,
# Whether to use Double_Q learning
"double_q": True,
# Optimize over complete episodes by default.
"batch_mode": "complete_episodes",
# === Evaluation ===
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Exploration ===
# Max num timesteps for annealing schedules. Exploration is annealed from
# 1.0 to exploration_fraction over this number of timesteps scaled by
# exploration_fraction
"schedule_max_timesteps": 100000,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 1000,
# Fraction of entire training period over which the exploration rate is
# annealed
"exploration_fraction": 0.1,
# Final value of random action probability
"exploration_final_eps": 0.02,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 500,
# === Replay buffer ===
# Size of the replay buffer in steps.
"buffer_size": 10000,
# === Optimization ===
# Learning rate for RMSProp optimizer
"lr": 0.0005,
# RMSProp alpha
"optim_alpha": 0.99,
# RMSProp epsilon
"optim_eps": 0.00001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 10,
# How many steps of the model to sample before learning starts.
"learning_starts": 1000,
# Update the replay buffer with this many samples at once. Note that
# this setting applies per-worker if num_workers > 1.
"sample_batch_size": 4,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 32,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you"re using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to use a distribution of epsilons across workers for exploration.
"per_worker_exploration": False,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
# === Model ===
"model": {
"lstm_cell_size": 64,
"max_seq_len": 999999,
},
})
# __sphinx_doc_end__
# yapf: enable
def make_sync_batch_optimizer(workers, config):
return SyncBatchReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
train_batch_size=config["train_batch_size"])
QMixTrainer = GenericOffPolicyTrainer.with_updates(
name="QMIX",
default_config=DEFAULT_CONFIG,
default_policy=QMixTorchPolicy,
make_policy_optimizer=make_sync_batch_optimizer)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/qmix/qmix_policy.py
|
Python
|
from gym.spaces import Tuple, Discrete, Dict
import logging
import numpy as np
import torch as th
import torch.nn as nn
from torch.optim import RMSprop
from torch.distributions import Categorical
import ray
from ray.rllib.agents.qmix.mixers import VDNMixer, QMixer
from ray.rllib.agents.qmix.model import RNNModel, _get_size
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.policy import TupleActions, Policy
from ray.rllib.policy.rnn_sequencing import chop_into_sequences
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.model import _unpack_obs
from ray.rllib.env.constants import GROUP_REWARDS
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
# if the obs space is Dict type, look for the global state under this key
ENV_STATE = "state"
class QMixLoss(nn.Module):
def __init__(self,
model,
target_model,
mixer,
target_mixer,
n_agents,
n_actions,
double_q=True,
gamma=0.99):
nn.Module.__init__(self)
self.model = model
self.target_model = target_model
self.mixer = mixer
self.target_mixer = target_mixer
self.n_agents = n_agents
self.n_actions = n_actions
self.double_q = double_q
self.gamma = gamma
def forward(self,
rewards,
actions,
terminated,
mask,
obs,
next_obs,
action_mask,
next_action_mask,
state=None,
next_state=None):
"""Forward pass of the loss.
Arguments:
rewards: Tensor of shape [B, T, n_agents]
actions: Tensor of shape [B, T, n_agents]
terminated: Tensor of shape [B, T, n_agents]
mask: Tensor of shape [B, T, n_agents]
obs: Tensor of shape [B, T, n_agents, obs_size]
next_obs: Tensor of shape [B, T, n_agents, obs_size]
action_mask: Tensor of shape [B, T, n_agents, n_actions]
next_action_mask: Tensor of shape [B, T, n_agents, n_actions]
state: Tensor of shape [B, T, state_dim] (optional)
next_state: Tensor of shape [B, T, state_dim] (optional)
"""
# Assert either none or both of state and next_state are given
if state is None and next_state is None:
state = obs # default to state being all agents' observations
next_state = next_obs
elif (state is None) != (next_state is None):
raise ValueError("Expected either neither or both of `state` and "
"`next_state` to be given. Got: "
"\n`state` = {}\n`next_state` = {}".format(
state, next_state))
# Calculate estimated Q-Values
mac_out = _unroll_mac(self.model, obs)
# Pick the Q-Values for the actions taken -> [B * n_agents, T]
chosen_action_qvals = th.gather(
mac_out, dim=3, index=actions.unsqueeze(3)).squeeze(3)
# Calculate the Q-Values necessary for the target
target_mac_out = _unroll_mac(self.target_model, next_obs)
# Mask out unavailable actions for the t+1 step
ignore_action_tp1 = (next_action_mask == 0) & (mask == 1).unsqueeze(-1)
target_mac_out[ignore_action_tp1] = -np.inf
# Max over target Q-Values
if self.double_q:
# Double Q learning computes the target Q values by selecting the
# t+1 timestep action according to the "policy" neural network and
# then estimating the Q-value of that action with the "target"
# neural network
# Compute the t+1 Q-values to be used in action selection
# using next_obs
mac_out_tp1 = _unroll_mac(self.model, next_obs)
# mask out unallowed actions
mac_out_tp1[ignore_action_tp1] = -np.inf
# obtain best actions at t+1 according to policy NN
cur_max_actions = mac_out_tp1.argmax(dim=3, keepdim=True)
# use the target network to estimate the Q-values of policy
# network's selected actions
target_max_qvals = th.gather(target_mac_out, 3,
cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
assert target_max_qvals.min().item() != -np.inf, \
"target_max_qvals contains a masked action; \
there may be a state with no valid actions."
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(chosen_action_qvals, state)
target_max_qvals = self.target_mixer(target_max_qvals, next_state)
# Calculate 1-step Q-Learning targets
targets = rewards + self.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error**2).sum() / mask.sum()
return loss, mask, masked_td_error, chosen_action_qvals, targets
# TODO(sven): Make this a TorchPolicy child.
class QMixTorchPolicy(Policy):
"""QMix impl. Assumes homogeneous agents for now.
You must use MultiAgentEnv.with_agent_groups() to group agents
together for QMix. This creates the proper Tuple obs/action spaces and
populates the '_group_rewards' info field.
Action masking: to specify an action mask for individual agents, use a
dict space with an action_mask key, e.g. {"obs": ob, "action_mask": mask}.
The mask space must be `Box(0, 1, (n_actions,))`.
"""
def __init__(self, obs_space, action_space, config):
_validate(obs_space, action_space)
config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
super().__init__(obs_space, action_space, config)
self.n_agents = len(obs_space.original_space.spaces)
self.n_actions = action_space.spaces[0].n
self.h_size = config["model"]["lstm_cell_size"]
self.has_env_global_state = False
self.has_action_mask = False
self.device = (th.device("cuda")
if th.cuda.is_available() else th.device("cpu"))
agent_obs_space = obs_space.original_space.spaces[0]
if isinstance(agent_obs_space, Dict):
space_keys = set(agent_obs_space.spaces.keys())
if "obs" not in space_keys:
raise ValueError(
"Dict obs space must have subspace labeled `obs`")
self.obs_size = _get_size(agent_obs_space.spaces["obs"])
if "action_mask" in space_keys:
mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
if mask_shape != (self.n_actions, ):
raise ValueError(
"Action mask shape must be {}, got {}".format(
(self.n_actions, ), mask_shape))
self.has_action_mask = True
if ENV_STATE in space_keys:
self.env_global_state_shape = _get_size(
agent_obs_space.spaces[ENV_STATE])
self.has_env_global_state = True
else:
self.env_global_state_shape = (self.obs_size, self.n_agents)
# The real agent obs space is nested inside the dict
config["model"]["full_obs_space"] = agent_obs_space
agent_obs_space = agent_obs_space.spaces["obs"]
else:
self.obs_size = _get_size(agent_obs_space)
self.model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="model",
default_model=RNNModel).to(self.device)
self.target_model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="target_model",
default_model=RNNModel).to(self.device)
# Setup the mixer network.
if config["mixer"] is None:
self.mixer = None
self.target_mixer = None
elif config["mixer"] == "qmix":
self.mixer = QMixer(self.n_agents, self.env_global_state_shape,
config["mixing_embed_dim"]).to(self.device)
self.target_mixer = QMixer(
self.n_agents, self.env_global_state_shape,
config["mixing_embed_dim"]).to(self.device)
elif config["mixer"] == "vdn":
self.mixer = VDNMixer().to(self.device)
self.target_mixer = VDNMixer().to(self.device)
else:
raise ValueError("Unknown mixer type {}".format(config["mixer"]))
self.cur_epsilon = 1.0
self.update_target() # initial sync
# Setup optimizer
self.params = list(self.model.parameters())
if self.mixer:
self.params += list(self.mixer.parameters())
self.loss = QMixLoss(self.model, self.target_model, self.mixer,
self.target_mixer, self.n_agents, self.n_actions,
self.config["double_q"], self.config["gamma"])
self.optimiser = RMSprop(
params=self.params,
lr=config["lr"],
alpha=config["optim_alpha"],
eps=config["optim_eps"])
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
obs_batch, action_mask, _ = self._unpack_observation(obs_batch)
# We need to ensure we do not use the env global state
# to compute actions
# Compute actions
with th.no_grad():
q_values, hiddens = _mac(
self.model,
th.as_tensor(obs_batch, dtype=th.float, device=self.device), [
th.as_tensor(
np.array(s), dtype=th.float, device=self.device)
for s in state_batches
])
avail = th.as_tensor(
action_mask, dtype=th.float, device=self.device)
masked_q_values = q_values.clone()
masked_q_values[avail == 0.0] = -float("inf")
# epsilon-greedy action selector
random_numbers = th.rand_like(q_values[:, :, 0])
pick_random = (random_numbers < self.cur_epsilon).long()
random_actions = Categorical(avail).sample().long()
actions = (pick_random * random_actions +
(1 - pick_random) * masked_q_values.argmax(dim=2))
actions = actions.cpu().numpy()
hiddens = [s.cpu().numpy() for s in hiddens]
return TupleActions(list(actions.transpose([1, 0]))), hiddens, {}
@override(Policy)
def learn_on_batch(self, samples):
obs_batch, action_mask, env_global_state = self._unpack_observation(
samples[SampleBatch.CUR_OBS])
(next_obs_batch, next_action_mask,
next_env_global_state) = self._unpack_observation(
samples[SampleBatch.NEXT_OBS])
group_rewards = self._get_group_rewards(samples[SampleBatch.INFOS])
input_list = [
group_rewards, action_mask, next_action_mask,
samples[SampleBatch.ACTIONS], samples[SampleBatch.DONES],
obs_batch, next_obs_batch
]
if self.has_env_global_state:
input_list.extend([env_global_state, next_env_global_state])
output_list, _, seq_lens = \
chop_into_sequences(
samples[SampleBatch.EPS_ID],
samples[SampleBatch.UNROLL_ID],
samples[SampleBatch.AGENT_INDEX],
input_list,
[], # RNN states not used here
max_seq_len=self.config["model"]["max_seq_len"],
dynamic_max=True)
# These will be padded to shape [B * T, ...]
if self.has_env_global_state:
(rew, action_mask, next_action_mask, act, dones, obs, next_obs,
env_global_state, next_env_global_state) = output_list
else:
(rew, action_mask, next_action_mask, act, dones, obs,
next_obs) = output_list
B, T = len(seq_lens), max(seq_lens)
def to_batches(arr, dtype):
new_shape = [B, T] + list(arr.shape[1:])
return th.as_tensor(
np.reshape(arr, new_shape), dtype=dtype, device=self.device)
rewards = to_batches(rew, th.float)
actions = to_batches(act, th.long)
obs = to_batches(obs, th.float).reshape(
[B, T, self.n_agents, self.obs_size])
action_mask = to_batches(action_mask, th.float)
next_obs = to_batches(next_obs, th.float).reshape(
[B, T, self.n_agents, self.obs_size])
next_action_mask = to_batches(next_action_mask, th.float)
if self.has_env_global_state:
env_global_state = to_batches(env_global_state, th.float)
next_env_global_state = to_batches(next_env_global_state, th.float)
# TODO(ekl) this treats group termination as individual termination
terminated = to_batches(dones, th.float).unsqueeze(2).expand(
B, T, self.n_agents)
# Create mask for where index is < unpadded sequence length
filled = np.reshape(
np.tile(np.arange(T, dtype=np.float32), B),
[B, T]) < np.expand_dims(seq_lens, 1)
mask = th.as_tensor(
filled, dtype=th.float, device=self.device).unsqueeze(2).expand(
B, T, self.n_agents)
# Compute loss
loss_out, mask, masked_td_error, chosen_action_qvals, targets = (
self.loss(rewards, actions, terminated, mask, obs, next_obs,
action_mask, next_action_mask, env_global_state,
next_env_global_state))
# Optimise
self.optimiser.zero_grad()
loss_out.backward()
grad_norm = th.nn.utils.clip_grad_norm_(
self.params, self.config["grad_norm_clipping"])
self.optimiser.step()
mask_elems = mask.sum().item()
stats = {
"loss": loss_out.item(),
"grad_norm": grad_norm
if isinstance(grad_norm, float) else grad_norm.item(),
"td_error_abs": masked_td_error.abs().sum().item() / mask_elems,
"q_taken_mean": (chosen_action_qvals * mask).sum().item() /
mask_elems,
"target_mean": (targets * mask).sum().item() / mask_elems,
}
return {LEARNER_STATS_KEY: stats}
@override(Policy)
def get_initial_state(self): # initial RNN state
return [
s.expand([self.n_agents, -1]).cpu().numpy()
for s in self.model.get_initial_state()
]
@override(Policy)
def get_weights(self):
return {
"model": self._cpu_dict(self.model.state_dict()),
"target_model": self._cpu_dict(self.target_model.state_dict()),
"mixer": self._cpu_dict(self.mixer.state_dict())
if self.mixer else None,
"target_mixer": self._cpu_dict(self.target_mixer.state_dict())
if self.mixer else None,
}
@override(Policy)
def set_weights(self, weights):
self.model.load_state_dict(self._device_dict(weights["model"]))
self.target_model.load_state_dict(
self._device_dict(weights["target_model"]))
if weights["mixer"] is not None:
self.mixer.load_state_dict(self._device_dict(weights["mixer"]))
self.target_mixer.load_state_dict(
self._device_dict(weights["target_mixer"]))
@override(Policy)
def get_state(self):
state = self.get_weights()
state["cur_epsilon"] = self.cur_epsilon
return state
@override(Policy)
def set_state(self, state):
self.set_weights(state)
self.set_epsilon(state["cur_epsilon"])
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict())
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
logger.debug("Updated target networks")
def set_epsilon(self, epsilon):
self.cur_epsilon = epsilon
def _get_group_rewards(self, info_batch):
group_rewards = np.array([
info.get(GROUP_REWARDS, [0.0] * self.n_agents)
for info in info_batch
])
return group_rewards
def _device_dict(self, state_dict):
return {
k: th.as_tensor(v, device=self.device)
for k, v in state_dict.items()
}
@staticmethod
def _cpu_dict(state_dict):
return {k: v.cpu().detach().numpy() for k, v in state_dict.items()}
def _unpack_observation(self, obs_batch):
"""Unpacks the observation, action mask, and state (if present)
from agent grouping.
Returns:
obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]
mask (np.ndarray): action mask, if any
state (np.ndarray or None): state tensor of shape [B, state_size]
or None if it is not in the batch
"""
unpacked = _unpack_obs(
np.array(obs_batch, dtype=np.float32),
self.observation_space.original_space,
tensorlib=np)
if self.has_action_mask:
obs = np.concatenate(
[o["obs"] for o in unpacked],
axis=1).reshape([len(obs_batch), self.n_agents, self.obs_size])
action_mask = np.concatenate(
[o["action_mask"] for o in unpacked], axis=1).reshape(
[len(obs_batch), self.n_agents, self.n_actions])
else:
if isinstance(unpacked[0], dict):
unpacked_obs = [u["obs"] for u in unpacked]
else:
unpacked_obs = unpacked
obs = np.concatenate(
unpacked_obs,
axis=1).reshape([len(obs_batch), self.n_agents, self.obs_size])
action_mask = np.ones(
[len(obs_batch), self.n_agents, self.n_actions],
dtype=np.float32)
if self.has_env_global_state:
state = unpacked[0][ENV_STATE]
else:
state = None
return obs, action_mask, state
def _validate(obs_space, action_space):
if not hasattr(obs_space, "original_space") or \
not isinstance(obs_space.original_space, Tuple):
raise ValueError("Obs space must be a Tuple, got {}. Use ".format(
obs_space) + "MultiAgentEnv.with_agent_groups() to group related "
"agents for QMix.")
if not isinstance(action_space, Tuple):
raise ValueError(
"Action space must be a Tuple, got {}. ".format(action_space) +
"Use MultiAgentEnv.with_agent_groups() to group related "
"agents for QMix.")
if not isinstance(action_space.spaces[0], Discrete):
raise ValueError(
"QMix requires a discrete action space, got {}".format(
action_space.spaces[0]))
if len({str(x) for x in obs_space.original_space.spaces}) > 1:
raise ValueError(
"Implementation limitation: observations of grouped agents "
"must be homogeneous, got {}".format(
obs_space.original_space.spaces))
if len({str(x) for x in action_space.spaces}) > 1:
raise ValueError(
"Implementation limitation: action space of grouped agents "
"must be homogeneous, got {}".format(action_space.spaces))
def _mac(model, obs, h):
"""Forward pass of the multi-agent controller.
Arguments:
model: TorchModelV2 class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_size]
"""
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)
return q_flat.reshape(
[B, n_agents, -1]), [s.reshape([B, n_agents, -1]) for s in h_flat]
def _unroll_mac(model, obs_tensor):
"""Computes the estimated Q values for an entire trajectory batch"""
B = obs_tensor.size(0)
T = obs_tensor.size(1)
n_agents = obs_tensor.size(2)
mac_out = []
h = [s.expand([B, n_agents, -1]) for s in model.get_initial_state()]
for t in range(T):
q, h = _mac(model, obs_tensor[:, t], h)
mac_out.append(q)
mac_out = th.stack(mac_out, dim=1) # Concat over time
return mac_out
def _drop_agent_dim(T):
shape = list(T.shape)
B, n_agents = shape[0], shape[1]
return T.reshape([B * n_agents] + shape[2:])
def _add_agent_dim(T, n_agents):
shape = list(T.shape)
B = shape[0] // n_agents
assert shape[0] % n_agents == 0
return T.reshape([B, n_agents] + shape[1:])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/registry.py
|
Python
|
"""Registry of algorithm names for `rllib train --run=<alg_name>`"""
import traceback
from ray.rllib.contrib.registry import CONTRIBUTED_ALGORITHMS
def _import_sac():
from ray.rllib.agents import sac
return sac.SACTrainer
def _import_appo():
from ray.rllib.agents import ppo
return ppo.APPOTrainer
def _import_qmix():
from ray.rllib.agents import qmix
return qmix.QMixTrainer
def _import_apex_qmix():
from ray.rllib.agents import qmix
return qmix.ApexQMixTrainer
def _import_ddpg():
from ray.rllib.agents import ddpg
return ddpg.DDPGTrainer
def _import_apex_ddpg():
from ray.rllib.agents import ddpg
return ddpg.ApexDDPGTrainer
def _import_td3():
from ray.rllib.agents import ddpg
return ddpg.TD3Trainer
def _import_ppo():
from ray.rllib.agents import ppo
return ppo.PPOTrainer
def _import_es():
from ray.rllib.agents import es
return es.ESTrainer
def _import_ars():
from ray.rllib.agents import ars
return ars.ARSTrainer
def _import_dqn():
from ray.rllib.agents import dqn
return dqn.DQNTrainer
def _import_simple_q():
from ray.rllib.agents import dqn
return dqn.SimpleQTrainer
def _import_apex():
from ray.rllib.agents import dqn
return dqn.ApexTrainer
def _import_a3c():
from ray.rllib.agents import a3c
return a3c.A3CTrainer
def _import_a2c():
from ray.rllib.agents import a3c
return a3c.A2CTrainer
def _import_pg():
from ray.rllib.agents import pg
return pg.PGTrainer
def _import_impala():
from ray.rllib.agents import impala
return impala.ImpalaTrainer
def _import_marwil():
from ray.rllib.agents import marwil
return marwil.MARWILTrainer
ALGORITHMS = {
"SAC": _import_sac,
"DDPG": _import_ddpg,
"APEX_DDPG": _import_apex_ddpg,
"TD3": _import_td3,
"PPO": _import_ppo,
"ES": _import_es,
"ARS": _import_ars,
"DQN": _import_dqn,
"SimpleQ": _import_simple_q,
"APEX": _import_apex,
"A3C": _import_a3c,
"A2C": _import_a2c,
"PG": _import_pg,
"IMPALA": _import_impala,
"QMIX": _import_qmix,
"APEX_QMIX": _import_apex_qmix,
"APPO": _import_appo,
"MARWIL": _import_marwil,
}
def get_agent_class(alg):
"""Returns the class of a known agent given its name."""
try:
return _get_agent_class(alg)
except ImportError:
from ray.rllib.agents.mock import _agent_import_failed
return _agent_import_failed(traceback.format_exc())
def _get_agent_class(alg):
if alg in ALGORITHMS:
return ALGORITHMS[alg]()
elif alg in CONTRIBUTED_ALGORITHMS:
return CONTRIBUTED_ALGORITHMS[alg]()
elif alg == "script":
from ray.tune import script_runner
return script_runner.ScriptRunner
elif alg == "__fake":
from ray.rllib.agents.mock import _MockTrainer
return _MockTrainer
elif alg == "__sigmoid_fake_data":
from ray.rllib.agents.mock import _SigmoidFakeData
return _SigmoidFakeData
elif alg == "__parameter_tuning":
from ray.rllib.agents.mock import _ParameterTuningTrainer
return _ParameterTuningTrainer
else:
raise Exception(("Unknown algorithm {}.").format(alg))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/sac/__init__.py
|
Python
|
from ray.rllib.agents.sac.sac import SACTrainer, DEFAULT_CONFIG
from ray.rllib.utils import renamed_agent
SACAgent = renamed_agent(SACTrainer)
__all__ = [
"SACTrainer",
"DEFAULT_CONFIG",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/sac/sac.py
|
Python
|
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.agents.sac.sac_policy import SACTFPolicy
OPTIMIZER_SHARED_CONFIGS = [
"buffer_size", "prioritized_replay", "prioritized_replay_alpha",
"prioritized_replay_beta", "prioritized_replay_eps", "sample_batch_size",
"train_batch_size", "learning_starts"
]
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Model ===
"twin_q": True,
"use_state_preprocessor": False,
"policy": "GaussianLatentSpacePolicy",
# RLlib model options for the Q function
"Q_model": {
"hidden_activation": "relu",
"hidden_layer_sizes": (256, 256),
},
# RLlib model options for the policy function
"policy_model": {
"hidden_activation": "relu",
"hidden_layer_sizes": (256, 256),
},
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": True,
# === Learning ===
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 5e-3,
# Target entropy lower bound. This is the inverse of reward scale,
# and will be optimized automatically.
"target_entropy": "auto",
# Disable setting done=True at end of episode.
"no_done_at_end": True,
# N-step target updates
"n_step": 1,
# === Evaluation ===
# The evaluation stats will be reported under the "evaluation" metric key.
"evaluation_interval": 1,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 1,
# Extra configuration that disables exploration.
"evaluation_config": {
"exploration_enabled": False,
},
# === Exploration ===
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 100,
"exploration_enabled": True,
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# If True prioritized replay buffer will be used.
# TODO(hartikainen): Make sure this works or remove the option.
"prioritized_replay": False,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"beta_annealing_fraction": 0.2,
"final_prioritized_replay_beta": 0.4,
"compress_observations": False,
# === Optimization ===
"optimization": {
"actor_learning_rate": 3e-4,
"critic_learning_rate": 3e-4,
"entropy_learning_rate": 3e-4,
},
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": None,
# How many steps of the model to sample before learning starts.
"learning_starts": 1500,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"sample_batch_size": 1,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 256,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# === Parallelism ===
# Whether to use a GPU for local optimization.
"num_gpus": 0,
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you"re using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to allocate GPUs for workers (if > 0).
"num_gpus_per_worker": 0,
# Whether to allocate CPUs for workers (if > 0).
"num_cpus_per_worker": 1,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
# TODO(ekl) these are unused; remove them from sac config
"per_worker_exploration": False,
"exploration_fraction": 0.1,
"schedule_max_timesteps": 100000,
"exploration_final_eps": 0.02,
})
# __sphinx_doc_end__
# yapf: enable
SACTrainer = GenericOffPolicyTrainer.with_updates(
name="SAC", default_config=DEFAULT_CONFIG, default_policy=SACTFPolicy)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/sac/sac_model.py
|
Python
|
import numpy as np
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils import try_import_tf, try_import_tfp
tf = try_import_tf()
tfp = try_import_tfp()
SCALE_DIAG_MIN_MAX = (-20, 2)
def SquashBijector():
# lazy def since it depends on tfp
class SquashBijector(tfp.bijectors.Bijector):
def __init__(self, validate_args=False, name="tanh"):
super(SquashBijector, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
return tf.atanh(y)
def _forward_log_det_jacobian(self, x):
return 2. * (np.log(2.) - x - tf.nn.softplus(-2. * x))
return SquashBijector()
class SACModel(TFModelV2):
"""Extension of standard TFModel for SAC.
Data flow:
obs -> forward() -> model_out
model_out -> get_policy_output() -> pi(s)
model_out, actions -> get_q_values() -> Q(s, a)
model_out, actions -> get_twin_q_values() -> Q_twin(s, a)
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
actor_hidden_activation="relu",
actor_hiddens=(256, 256),
critic_hidden_activation="relu",
critic_hiddens=(256, 256),
twin_q=False):
"""Initialize variables of this model.
Extra model kwargs:
actor_hidden_activation (str): activation for actor network
actor_hiddens (list): hidden layers sizes for actor network
critic_hidden_activation (str): activation for critic network
critic_hiddens (list): hidden layers sizes for critic network
twin_q (bool): build twin Q networks
Note that the core layers for forward() are not defined here, this
only defines the layers for the output heads. Those layers for
forward() should be defined in subclasses of SACModel.
"""
if tfp is None:
raise ImportError("tensorflow-probability package not found")
super(SACModel, self).__init__(obs_space, action_space, num_outputs,
model_config, name)
self.action_dim = np.product(action_space.shape)
self.model_out = tf.keras.layers.Input(
shape=(num_outputs, ), name="model_out")
self.actions = tf.keras.layers.Input(
shape=(self.action_dim, ), name="actions")
shift_and_log_scale_diag = tf.keras.Sequential([
tf.keras.layers.Dense(
units=hidden,
activation=getattr(tf.nn, actor_hidden_activation),
name="action_hidden_{}".format(i))
for i, hidden in enumerate(actor_hiddens)
] + [
tf.keras.layers.Dense(
units=2 * self.action_dim, activation=None, name="action_out")
])(self.model_out)
shift, log_scale_diag = tf.keras.layers.Lambda(
lambda shift_and_log_scale_diag: tf.split(
shift_and_log_scale_diag,
num_or_size_splits=2,
axis=-1)
)(shift_and_log_scale_diag)
log_scale_diag = tf.keras.layers.Lambda(
lambda log_sd: tf.clip_by_value(log_sd, *SCALE_DIAG_MIN_MAX))(
log_scale_diag)
shift_and_log_scale_diag = tf.keras.layers.Concatenate(axis=-1)(
[shift, log_scale_diag])
batch_size = tf.keras.layers.Lambda(lambda x: tf.shape(input=x)[0])(
self.model_out)
base_distribution = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(self.action_dim), scale_diag=tf.ones(self.action_dim))
latents = tf.keras.layers.Lambda(
lambda batch_size: base_distribution.sample(batch_size))(
batch_size)
self.shift_and_log_scale_diag = latents
self.latents_model = tf.keras.Model(self.model_out, latents)
def raw_actions_fn(inputs):
shift, log_scale_diag, latents = inputs
bijector = tfp.bijectors.Affine(
shift=shift, scale_diag=tf.exp(log_scale_diag))
actions = bijector.forward(latents)
return actions
raw_actions = tf.keras.layers.Lambda(raw_actions_fn)(
(shift, log_scale_diag, latents))
squash_bijector = (SquashBijector())
actions = tf.keras.layers.Lambda(
lambda raw_actions: squash_bijector.forward(raw_actions))(
raw_actions)
self.actions_model = tf.keras.Model(self.model_out, actions)
deterministic_actions = tf.keras.layers.Lambda(
lambda shift: squash_bijector.forward(shift))(shift)
self.deterministic_actions_model = tf.keras.Model(
self.model_out, deterministic_actions)
def log_pis_fn(inputs):
shift, log_scale_diag, actions = inputs
base_distribution = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(self.action_dim),
scale_diag=tf.ones(self.action_dim))
bijector = tfp.bijectors.Chain((
squash_bijector,
tfp.bijectors.Affine(
shift=shift, scale_diag=tf.exp(log_scale_diag)),
))
distribution = (tfp.distributions.TransformedDistribution(
distribution=base_distribution, bijector=bijector))
log_pis = distribution.log_prob(actions)[:, None]
return log_pis
self.actions_input = tf.keras.layers.Input(
shape=(self.action_dim, ), name="actions")
log_pis_for_action_input = tf.keras.layers.Lambda(log_pis_fn)(
[shift, log_scale_diag, self.actions_input])
self.log_pis_model = tf.keras.Model(
(self.model_out, self.actions_input), log_pis_for_action_input)
self.register_variables(self.actions_model.variables)
def build_q_net(name, observations, actions):
q_net = tf.keras.Sequential([
tf.keras.layers.Concatenate(axis=1),
] + [
tf.keras.layers.Dense(
units=units,
activation=getattr(tf.nn, critic_hidden_activation),
name="{}_hidden_{}".format(name, i))
for i, units in enumerate(critic_hiddens)
] + [
tf.keras.layers.Dense(
units=1, activation=None, name="{}_out".format(name))
])
# TODO(hartikainen): Remove the unnecessary Model call here
q_net = tf.keras.Model([observations, actions],
q_net([observations, actions]))
return q_net
self.q_net = build_q_net("q", self.model_out, self.actions)
self.register_variables(self.q_net.variables)
if twin_q:
self.twin_q_net = build_q_net("twin_q", self.model_out,
self.actions)
self.register_variables(self.twin_q_net.variables)
else:
self.twin_q_net = None
self.log_alpha = tf.Variable(0.0, dtype=tf.float32, name="log_alpha")
self.alpha = tf.exp(self.log_alpha)
self.register_variables([self.log_alpha])
def get_policy_output(self, model_out, deterministic=False):
"""Return the (unscaled) output of the policy network.
This returns the unscaled outputs of pi(s).
Arguments:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
Returns:
tensor of shape [BATCH_SIZE, action_dim] with range [-inf, inf].
"""
if deterministic:
actions = self.deterministic_actions_model(model_out)
log_pis = None
else:
actions = self.actions_model(model_out)
log_pis = self.log_pis_model((model_out, actions))
return actions, log_pis
def get_q_values(self, model_out, actions):
"""Return the Q estimates for the most recent forward pass.
This implements Q(s, a).
Arguments:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Tensor): action values that correspond with the most
recent batch of observations passed through forward(), of shape
[BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.q_net([model_out, actions])
def get_twin_q_values(self, model_out, actions):
"""Same as get_q_values but using the twin Q net.
This implements the twin Q(s, a).
Arguments:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Tensor): action values that correspond with the most
recent batch of observations passed through forward(), of shape
[BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.twin_q_net([model_out, actions])
def policy_variables(self):
"""Return the list of variables for the policy net."""
return list(self.actions_model.variables)
def q_variables(self):
"""Return the list of variables for Q / twin Q nets."""
return self.q_net.variables + (self.twin_q_net.variables
if self.twin_q_net else [])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/sac/sac_policy.py
|
Python
|
from gym.spaces import Box
import numpy as np
import logging
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.sac.sac_model import SACModel
from ray.rllib.agents.ddpg.noop_model import NoopModel
from ray.rllib.agents.dqn.dqn_policy import _postprocess_dqn, PRIO_WEIGHTS
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils import try_import_tf, try_import_tfp
from ray.rllib.utils.annotations import override
from ray.rllib.utils.tf_ops import minimize_and_clip, make_tf_callable
tf = try_import_tf()
tfp = try_import_tfp()
logger = logging.getLogger(__name__)
def build_sac_model(policy, obs_space, action_space, config):
if config["model"]["custom_model"]:
logger.warning(
"Setting use_state_preprocessor=True since a custom model "
"was specified.")
config["use_state_preprocessor"] = True
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space {} is not supported for SAC.".format(action_space))
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
if config["use_state_preprocessor"]:
default_model = None # catalog decides
num_outputs = 256 # arbitrary
config["model"]["no_final_linear"] = True
else:
default_model = NoopModel
num_outputs = int(np.product(obs_space.shape))
policy.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=SACModel,
default_model=default_model,
name="sac_model",
actor_hidden_activation=config["policy_model"]["hidden_activation"],
actor_hiddens=config["policy_model"]["hidden_layer_sizes"],
critic_hidden_activation=config["Q_model"]["hidden_activation"],
critic_hiddens=config["Q_model"]["hidden_layer_sizes"],
twin_q=config["twin_q"])
policy.target_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=SACModel,
default_model=default_model,
name="target_sac_model",
actor_hidden_activation=config["policy_model"]["hidden_activation"],
actor_hiddens=config["policy_model"]["hidden_layer_sizes"],
critic_hidden_activation=config["Q_model"]["hidden_activation"],
critic_hiddens=config["Q_model"]["hidden_layer_sizes"],
twin_q=config["twin_q"])
return policy.model
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
return _postprocess_dqn(policy, sample_batch)
def build_action_output(policy, model, input_dict, obs_space, action_space,
config):
model_out, _ = model({
"obs": input_dict[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
def unsquash_actions(actions):
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in SAC and TD3 papers.
sigmoid_out = tf.nn.sigmoid(2 * actions)
# Rescale to actual env policy scale
# (shape of sigmoid_out is [batch_size, dim_actions], so we reshape to
# get same dims)
action_range = (action_space.high - action_space.low)[None]
low_action = action_space.low[None]
unsquashed_actions = action_range * sigmoid_out + low_action
return unsquashed_actions
squashed_stochastic_actions, log_pis = policy.model.get_policy_output(
model_out, deterministic=False)
stochastic_actions = squashed_stochastic_actions if config[
"normalize_actions"] else unsquash_actions(squashed_stochastic_actions)
squashed_deterministic_actions, _ = policy.model.get_policy_output(
model_out, deterministic=True)
deterministic_actions = squashed_deterministic_actions if config[
"normalize_actions"] else unsquash_actions(
squashed_deterministic_actions)
actions = tf.cond(policy.stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
action_probabilities = tf.cond(policy.stochastic, lambda: log_pis,
lambda: tf.zeros_like(log_pis))
policy.output_actions = actions
return actions, action_probabilities
def actor_critic_loss(policy, model, _, train_batch):
model_out_t, _ = model({
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
model_out_tp1, _ = model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
target_model_out_tp1, _ = policy.target_model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# TODO(hartikainen): figure actions and log pis
policy_t, log_pis_t = model.get_policy_output(model_out_t)
policy_tp1, log_pis_tp1 = model.get_policy_output(model_out_tp1)
log_alpha = model.log_alpha
alpha = model.alpha
# q network evaluation
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_q_values(model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_tp1 -= alpha * log_pis_tp1
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * q_tp1_best
assert policy.config["n_step"] == 1, "TODO(hartikainen) n_step > 1"
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
train_batch[SampleBatch.REWARDS] +
policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# compute the error (potentially clipped)
if policy.config["twin_q"]:
base_td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
td_error = 0.5 * (tf.square(base_td_error) + tf.square(twin_td_error))
else:
td_error = tf.square(q_t_selected - q_t_selected_target)
critic_loss = [
tf.losses.mean_squared_error(
labels=q_t_selected_target, predictions=q_t_selected, weights=0.5)
]
if policy.config["twin_q"]:
critic_loss.append(
tf.losses.mean_squared_error(
labels=q_t_selected_target,
predictions=twin_q_t_selected,
weights=0.5))
target_entropy = (-np.prod(policy.action_space.shape)
if policy.config["target_entropy"] == "auto" else
policy.config["target_entropy"])
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis_t + target_entropy))
actor_loss = tf.reduce_mean(alpha * log_pis_t - q_t_det_policy)
# save for stats function
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
# in a custom apply op we handle the losses separately, but return them
# combined in one loss for now
return actor_loss + tf.add_n(critic_loss) + alpha_loss
def gradients(policy, optimizer, loss):
if policy.config["grad_norm_clipping"] is not None:
actor_grads_and_vars = minimize_and_clip(
optimizer,
policy.actor_loss,
var_list=policy.model.policy_variables(),
clip_val=policy.config["grad_norm_clipping"])
if policy.config["twin_q"]:
q_variables = policy.model.q_variables()
half_cutoff = len(q_variables) // 2
critic_grads_and_vars = []
critic_grads_and_vars += minimize_and_clip(
optimizer,
policy.critic_loss[0],
var_list=q_variables[:half_cutoff],
clip_val=policy.config["grad_norm_clipping"])
critic_grads_and_vars += minimize_and_clip(
optimizer,
policy.critic_loss[1],
var_list=q_variables[half_cutoff:],
clip_val=policy.config["grad_norm_clipping"])
else:
critic_grads_and_vars = minimize_and_clip(
optimizer,
policy.critic_loss[0],
var_list=policy.model.q_variables(),
clip_val=policy.config["grad_norm_clipping"])
alpha_grads_and_vars = minimize_and_clip(
optimizer,
policy.alpha_loss,
var_list=[policy.model.log_alpha],
clip_val=policy.config["grad_norm_clipping"])
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables())
if policy.config["twin_q"]:
q_variables = policy.model.q_variables()
half_cutoff = len(q_variables) // 2
base_q_optimizer, twin_q_optimizer = policy._critic_optimizer
critic_grads_and_vars = base_q_optimizer.compute_gradients(
policy.critic_loss[0], var_list=q_variables[:half_cutoff]
) + twin_q_optimizer.compute_gradients(
policy.critic_loss[1], var_list=q_variables[half_cutoff:])
else:
critic_grads_and_vars = policy._critic_optimizer[
0].compute_gradients(
policy.critic_loss[0], var_list=policy.model.q_variables())
alpha_grads_and_vars = policy._alpha_optimizer.compute_gradients(
policy.alpha_loss, var_list=[policy.model.log_alpha])
# save these for later use in build_apply_op
policy._actor_grads_and_vars = [(g, v) for (g, v) in actor_grads_and_vars
if g is not None]
policy._critic_grads_and_vars = [(g, v) for (g, v) in critic_grads_and_vars
if g is not None]
policy._alpha_grads_and_vars = [(g, v) for (g, v) in alpha_grads_and_vars
if g is not None]
grads_and_vars = (
policy._actor_grads_and_vars + policy._critic_grads_and_vars +
policy._alpha_grads_and_vars)
return grads_and_vars
def apply_gradients(policy, optimizer, grads_and_vars):
actor_apply_ops = policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars)
cgrads = policy._critic_grads_and_vars
half_cutoff = len(cgrads) // 2
if policy.config["twin_q"]:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads[:half_cutoff]),
policy._critic_optimizer[1].apply_gradients(cgrads[half_cutoff:])
]
else:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads)
]
alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
policy._alpha_grads_and_vars,
global_step=tf.train.get_or_create_global_step())
return tf.group([actor_apply_ops, alpha_apply_ops] + critic_apply_ops)
def stats(policy, train_batch):
return {
"td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
class ExplorationStateMixin:
def __init__(self, obs_space, action_space, config):
self.stochastic = tf.get_variable(
initializer=tf.constant_initializer(config["exploration_enabled"]),
name="stochastic",
shape=(),
trainable=False,
dtype=tf.bool)
def set_epsilon(self, epsilon):
pass
class ActorCriticOptimizerMixin:
def __init__(self, config):
# create global step for counting the number of update operations
self.global_step = tf.train.get_or_create_global_step()
# use separate optimizers for actor & critic
self._actor_optimizer = tf.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = [
tf.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"])
]
if config["twin_q"]:
self._critic_optimizer.append(
tf.train.AdamOptimizer(learning_rate=config["optimization"][
"critic_learning_rate"]))
self._alpha_optimizer = tf.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"])
class ComputeTDErrorMixin:
def __init__(self):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
# Do forward pass on loss to update td error attribute
actor_critic_loss(
self, self.model, None, {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.DONES: tf.convert_to_tensor(done_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
})
return self.td_error
self.compute_td_error = compute_td_error
class TargetNetworkMixin:
def __init__(self, config):
@make_tf_callable(self.get_session())
def update_target_fn(tau):
tau = tf.convert_to_tensor(tau, dtype=tf.float32)
update_target_expr = []
model_vars = self.model.trainable_variables()
target_model_vars = self.target_model.trainable_variables()
assert len(model_vars) == len(target_model_vars), \
(model_vars, target_model_vars)
for var, var_target in zip(model_vars, target_model_vars):
update_target_expr.append(
var_target.assign(tau * var + (1.0 - tau) * var_target))
logger.debug("Update target op {}".format(var_target))
return tf.group(*update_target_expr)
# Hard initial update
self._do_update = update_target_fn
self.update_target(tau=1.0)
# support both hard and soft sync
def update_target(self, tau=None):
self._do_update(np.float32(tau or self.config.get("tau")))
@override(TFPolicy)
def variables(self):
return self.model.variables() + self.target_model.variables()
def setup_early_mixins(policy, obs_space, action_space, config):
ExplorationStateMixin.__init__(policy, obs_space, action_space, config)
ActorCriticOptimizerMixin.__init__(policy, config)
def setup_mid_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, config)
SACTFPolicy = build_tf_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.agents.sac.sac.DEFAULT_CONFIG,
make_model=build_sac_model,
postprocess_fn=postprocess_trajectory,
action_sampler_fn=build_action_output,
loss_fn=actor_critic_loss,
stats_fn=stats,
gradients_fn=gradients,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[
TargetNetworkMixin, ExplorationStateMixin, ActorCriticOptimizerMixin,
ComputeTDErrorMixin
],
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/trainer.py
|
Python
|
from datetime import datetime
import copy
import logging
import os
import pickle
import six
import time
import tempfile
import ray
from ray.exceptions import RayError
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.memory import ray_get_and_free
from ray.rllib.utils import try_import_tf
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.rllib.env.normalize_actions import NormalizeActionWrapper
tf = try_import_tf()
logger = logging.getLogger(__name__)
# Max number of times to retry a worker failure. We shouldn't try too many
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG = {
# === Settings for Rollout Worker processes ===
# Number of rollout worker actors to create for parallel sampling. Setting
# this to 0 will force rollouts to be done in the trainer actor.
"num_workers": 2,
# Number of environments to evaluate vectorwise per worker. This enables
# model inference batching, which can improve performance for inference
# bottlenecked workloads.
"num_envs_per_worker": 1,
# Default sample batch size (unroll length). Batches of this size are
# collected from rollout workers until train_batch_size is met. When using
# multiple envs per worker, this is multiplied by num_envs_per_worker.
#
# For example, given sample_batch_size=100 and train_batch_size=1000:
# 1. RLlib will collect 10 batches of size 100 from the rollout workers.
# 2. These batches are concatenated and we perform an epoch of SGD.
#
# If we further set num_envs_per_worker=5, then the sample batches will be
# of size 5*100 = 500, and RLlib will only collect 2 batches per epoch.
#
# The exact workflow here can vary per algorithm. For example, PPO further
# divides the train batch into minibatches for multi-epoch SGD.
"sample_batch_size": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes" to
# `sample_batch_size` length unrolls. Episode truncation guarantees more
# evenly sized batches, but increases variance as the reward-to-go will
# need to be estimated at truncation boundaries.
"batch_mode": "truncate_episodes",
# === Settings for the Trainer process ===
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Training batch size, if applicable. Should be >= sample_batch_size.
# Samples batches will be concatenated together to a batch of this size,
# which is then passed to SGD.
"train_batch_size": 200,
# Arguments to pass to the policy model. See models/catalog.py for a full
# list of the available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment Settings ===
# Discount factor of the MDP.
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
# hit. This allows value estimation and RNN state to span across logical
# episodes denoted by horizon. This only has an effect if horizon != inf.
"soft_horizon": False,
# Don't set 'done' at the end of the episode. Note that you still need to
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator.
"env_config": {},
# Environment name can also be passed via config.
"env": None,
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": False,
# Whether to clip rewards prior to experience postprocessing. Setting to
# None means clip for Atari only.
"clip_rewards": None,
# Whether to np.clip() actions to the action space low/high range spec.
"clip_actions": True,
# Whether to use rllib or deepmind preprocessors by default
"preprocessor_pref": "deepmind",
# The default learning rate.
"lr": 0.0001,
# === Debug Settings ===
# Whether to write episode stats and videos to the agent log dir. This is
# typically located in ~/ray_results.
"monitor": False,
# Set the ray.rllib.* log level for the agent process and its workers.
# Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also
# periodically print out summaries of relevant internal dataflow (this is
# also printed out once at startup at the INFO level). When using the
# `rllib train` command, you can also use the `-v` and `-vv` flags as
# shorthand for INFO and DEBUG.
"log_level": "WARN",
# Callbacks that will be run during various phases of training. These all
# take a single "info" dict as an argument. For episode callbacks, custom
# metrics can be attached to the episode by updating the episode object's
# custom metrics dict (see examples/custom_metrics_and_callbacks.py). You
# may also mutate the passed in batch data in your callback.
"callbacks": {
"on_episode_start": None, # arg: {"env": .., "episode": ...}
"on_episode_step": None, # arg: {"env": .., "episode": ...}
"on_episode_end": None, # arg: {"env": .., "episode": ...}
"on_sample_end": None, # arg: {"samples": .., "worker": ...}
"on_train_result": None, # arg: {"trainer": ..., "result": ...}
"on_postprocess_traj": None, # arg: {
# "agent_id": ..., "episode": ...,
# "pre_batch": (before processing),
# "post_batch": (after processing),
# "all_pre_batches": (other agent ids),
# }
},
# Whether to attempt to continue training if a worker crashes. The number
# of currently healthy workers is reported as the "num_healthy_workers"
# metric.
"ignore_worker_failures": False,
# Log system resource metrics to results. This requires `psutil` to be
# installed for sys stats, and `gputil` for GPU metrics.
"log_sys_usage": True,
# === Framework Settings ===
# Use PyTorch (instead of tf). If using `rllib train`, this can also be
# enabled with the `--torch` flag.
# NOTE: Some agents may not support `torch` yet and throw an error.
"use_pytorch": False,
# Enable TF eager execution (TF policies only). If using `rllib train`,
# this can also be enabled with the `--eager` flag.
"eager": False,
# Enable tracing in eager mode. This greatly improves performance, but
# makes it slightly harder to debug since Python code won't be evaluated
# after the initial eager pass.
"eager_tracing": False,
# Disable eager execution on workers (but allow it on the driver). This
# only has an effect if eager is enabled.
"no_eager_on_workers": False,
# === Evaluation Settings ===
# Evaluate with every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# Extra arguments to pass to evaluation workers.
# Typical usage is to pass extra args to evaluation env creator
# and to disable exploration by computing deterministic actions
# TODO(kismuz): implement determ. actions and include relevant keys hints
"evaluation_config": {},
# === Advanced Rollout Settings ===
# Use a background thread for sampling (slightly off-policy, usually not
# advisable to turn on unless your env specifically requires it).
"sample_async": False,
# Element-wise observation filter, either "NoFilter" or "MeanStdFilter".
"observation_filter": "NoFilter",
# Whether to synchronize the statistics of remote filters.
"synchronize_filters": True,
# Configures TF for single-process operation by default.
"tf_session_args": {
# note: overriden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
# Whether to LZ4 compress individual observations
"compress_observations": False,
# Wait for metric batches for at most this many seconds. Those that
# have not returned in time will be collected in the next iteration.
"collect_metrics_timeout": 180,
# Smooth metrics over this many episodes.
"metrics_smoothing_episodes": 100,
# If using num_envs_per_worker > 1, whether to create those new envs in
# remote processes instead of in the same worker. This adds overheads, but
# can make sense if your envs can take much time to step / reset
# (e.g., for StarCraft). Use this cautiously; overheads are significant.
"remote_worker_envs": False,
# Timeout that remote workers are waiting when polling environments.
# 0 (continue when at least one env is ready) is a reasonable default,
# but optimal value could be obtained by measuring your environment
# step / reset and model inference perf.
"remote_env_batch_wait_ms": 0,
# Minimum time per iteration
"min_iter_time_s": 0,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of iterations.
"timesteps_per_iteration": 0,
# This argument, in conjunction with worker_index, sets the random seed of
# each worker, so that identically configured trials will have identical
# results. This makes experiments reproducible.
"seed": None,
# === Advanced Resource Settings ===
# Number of CPUs to allocate per worker.
"num_cpus_per_worker": 1,
# Number of GPUs to allocate per worker. This can be fractional. This is
# usually needed only if your env itself requires a GPU (i.e., it is a
# GPU-intensive video game), or model inference is unusually expensive.
"num_gpus_per_worker": 0,
# Any custom Ray resources to allocate per worker.
"custom_resources_per_worker": {},
# Number of CPUs to allocate for the trainer. Note: this only takes effect
# when running in Tune. Otherwise, the trainer runs in the main program.
"num_cpus_for_driver": 1,
# You can set these memory quotas to tell Ray to reserve memory for your
# training run. This guarantees predictable execution, but the tradeoff is
# if your workload exceeeds the memory quota it will fail.
# Heap memory to reserve for the trainer process (0 for unlimited). This
# can be large if your are using large train batches, replay buffers, etc.
"memory": 0,
# Object store memory to reserve for the trainer process. Being large
# enough to fit a few copies of the model weights should be sufficient.
# This is enabled by default since models are typically quite small.
"object_store_memory": 0,
# Heap memory to reserve for each worker. Should generally be small unless
# your environment is very heavyweight.
"memory_per_worker": 0,
# Object store memory to reserve for each worker. This only needs to be
# large enough to fit a few sample batches at a time. This is enabled
# by default since it almost never needs to be larger than ~200MB.
"object_store_memory_per_worker": 0,
# === Offline Datasets ===
# Specify how to generate experiences:
# - "sampler": generate experiences via online simulation (default)
# - a local directory or file glob expression (e.g., "/tmp/*.json")
# - a list of individual file paths/URIs (e.g., ["/tmp/1.json",
# "s3://bucket/2.json"])
# - a dict with string keys and sampling probabilities as values (e.g.,
# {"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
# - a function that returns a rllib.offline.InputReader
"input": "sampler",
# Specify how to evaluate the current policy. This only has an effect when
# reading offline experiences. Available options:
# - "wis": the weighted step-wise importance sampling estimator.
# - "is": the step-wise importance sampling estimator.
# - "simulation": run the environment in the background, but use
# this data for evaluation only and not for learning.
"input_evaluation": ["is", "wis"],
# Whether to run postprocess_trajectory() on the trajectory fragments from
# offline inputs. Note that postprocessing will be done using the *current*
# policy, not the *behaviour* policy, which is typically undesirable for
# on-policy algorithms.
"postprocess_inputs": False,
# If positive, input batches will be shuffled via a sliding window buffer
# of this number of batches. Use this if the input data is not in random
# enough order. Input is delayed until the shuffle buffer is filled.
"shuffle_buffer_size": 0,
# Specify where experiences should be saved:
# - None: don't save any experiences
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Settings for Multi-Agent Environments ===
"multiagent": {
# Map from policy ids to tuples of (policy_cls, obs_space,
# act_space, config). See rollout_worker.py for more info.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional whitelist of policies to train, or None for all policies.
"policies_to_train": None,
},
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(extra_config):
"""Returns the given config dict merged with common agent confs."""
return with_base_config(COMMON_CONFIG, extra_config)
def with_base_config(base_config, extra_config):
"""Returns the given config dict merged with a base agent conf."""
config = copy.deepcopy(base_config)
config.update(extra_config)
return config
@PublicAPI
class Trainer(Trainable):
"""A trainer coordinates the optimization of one or more RL policies.
All RLlib trainers extend this base class, e.g., the A3CTrainer implements
the A3C algorithm for single and multi-agent training.
Trainer objects retain internal model state between calls to train(), so
you should create a new trainer instance for each training session.
Attributes:
env_creator (func): Function that creates a new training env.
config (obj): Algorithm-specific configuration data.
logdir (str): Directory in which training outputs should be placed.
"""
_allow_unknown_configs = False
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config"
]
@PublicAPI
def __init__(self, config=None, env=None, logger_creator=None):
"""Initialize an RLLib trainer.
Args:
config (dict): Algorithm-specific configuration data.
env (str): Name of the environment to use. Note that this can also
be specified as the `env` key in config.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
config = config or {}
if tf and config.get("eager"):
tf.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
"True" if config.get("eager_tracing") else "False"))
if tf and not tf.executing_eagerly():
logger.info("Tip: set 'eager': true or the --eager flag to enable "
"TensorFlow eager execution")
# Vars to synchronize to workers on each train call
self.global_vars = {"timestep": 0}
# Trainers allow env ids to be passed directly to the constructor.
self._env_id = self._register_if_needed(env or config.get("env"))
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
Trainable.__init__(self, config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(cls, config):
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
# TODO(ekl): add custom resources here once tune supports them
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * cf["num_workers"],
extra_gpu=cf["num_gpus_per_worker"] * cf["num_workers"],
extra_memory=cf["memory_per_worker"] * cf["num_workers"],
extra_object_store_memory=cf["object_store_memory_per_worker"] *
cf["num_workers"])
@override(Trainable)
@PublicAPI
def train(self):
"""Overrides super.train to synchronize global vars."""
if self._has_policy_optimizer():
self.global_vars["timestep"] = self.optimizer.num_steps_sampled
self.optimizer.workers.local_worker().set_global_vars(
self.global_vars)
for w in self.optimizer.workers.remote_workers():
w.set_global_vars.remote(self.global_vars)
logger.debug("updated global vars: {}".format(self.global_vars))
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) # allow logs messages to propagate
raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if (self.config.get("observation_filter", "NoFilter") != "NoFilter"
and hasattr(self, "workers")
and isinstance(self.workers, WorkerSet)):
FilterManager.synchronize(
self.workers.local_worker().filters,
self.workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
self.workers.local_worker().filters))
if self._has_policy_optimizer():
result["num_healthy_workers"] = len(
self.optimizer.workers.remote_workers())
if self.config["evaluation_interval"]:
if self._iteration % self.config["evaluation_interval"] == 0:
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
@override(Trainable)
def _log_result(self, result):
if self.config["callbacks"].get("on_train_result"):
self.config["callbacks"]["on_train_result"]({
"trainer": self,
"result": result,
})
# log after the callback is invoked, so that the user has a chance
# to mutate the result
Trainable._log_result(self, result)
@override(Trainable)
def _setup(self, config):
env = self._env_id
if env:
config["env"] = env
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
else:
import gym # soft dependency
self.env_creator = lambda env_config: gym.make(env)
else:
self.env_creator = lambda env_config: None
# Merge the supplied config with the class default
merged_config = copy.deepcopy(self._default_config)
merged_config = deep_update(merged_config, config,
self._allow_unknown_configs,
self._allow_unknown_subkeys)
self.raw_user_config = config
self.config = merged_config
if self.config["normalize_actions"]:
inner = self.env_creator
self.env_creator = (
lambda env_config: NormalizeActionWrapper(inner(env_config)))
Trainer._validate_config(self.config)
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info("Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level))
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf and not tf.executing_eagerly():
return tf.Graph().as_default()
else:
return open("/dev/null") # fake a no-op scope
with get_scope():
self._init(self.config, self.env_creator)
# Evaluation related
if self.config.get("evaluation_interval"):
# Update env_config with evaluation settings:
extra_config = copy.deepcopy(self.config["evaluation_config"])
extra_config.update({
"batch_mode": "complete_episodes",
"batch_steps": 1,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
self.env_creator,
self._policy,
merge_dicts(self.config, extra_config),
num_workers=0)
self.evaluation_metrics = self._evaluate()
@override(Trainable)
def _stop(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer"):
self.optimizer.stop()
@override(Trainable)
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def _restore(self, checkpoint_path):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(self, env_creator, policy, config, num_workers):
return WorkerSet(
env_creator,
policy,
config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config, env_creator):
"""Subclasses should override this for custom initialization."""
raise NotImplementedError
@DeveloperAPI
def _evaluate(self):
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
"""
if not self.config["evaluation_config"]:
raise ValueError(
"No evaluation_config specified. It doesn't make sense "
"to enable evaluation without specifying any config "
"overrides, since the results will be the "
"same as reported during normal policy evaluation.")
logger.info("Evaluating current policy for {} episodes".format(
self.config["evaluation_num_episodes"]))
self._before_evaluate()
self.evaluation_workers.local_worker().restore(
self.workers.local_worker().save())
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
metrics = collect_metrics(self.evaluation_workers.local_worker())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@PublicAPI
def compute_action(self,
observation,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False):
"""Computes an action for the specified policy.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Arguments:
observation (obj): observation from the environment.
state (list): RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state, logits dictionary).
Otherwise compute_single_action(...)[0] is
returned (computed action).
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
policy_id (str): policy to query (only applies to multi-agent).
full_fetch (bool): whether to return extra action fetch results.
This is always set to true if RNN state is specified.
Returns:
Just the computed action if full_fetch=False, or the full output
of policy.compute_actions() otherwise.
"""
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
if state:
return self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"])
res = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"])
if full_fetch:
return res
else:
return res[0] # backwards compatibility
@property
def _name(self):
"""Subclasses should override this to declare their name."""
raise NotImplementedError
@property
def _default_config(self):
"""Subclasses should override this to declare their default config."""
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies=None):
"""Return a dictionary of policy ids to weights.
Arguments:
policies (list): Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights):
"""Set policy weights by policy id.
Arguments:
weights (dict): Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
"""Export policy model with given policy_id to local directory.
Arguments:
export_dir (string): Writable local directory.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_model("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
"""Export tensorflow policy model checkpoint to local directory.
Arguments:
export_dir (string): Writable local directory.
filename_prefix (string): file name prefix of checkpoint files.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_checkpoint("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def collect_metrics(self, selected_workers=None):
"""Collects metrics from the remote workers of this agent.
This is the same data as returned by a call to train().
"""
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config):
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@staticmethod
def _validate_config(config):
if "policy_graphs" in config["multiagent"]:
logger.warning(
"The `policy_graphs` config has been renamed to `policies`.")
# Backwards compatibility
config["multiagent"]["policies"] = config["multiagent"][
"policy_graphs"]
del config["multiagent"]["policy_graphs"]
if "gpu" in config:
raise ValueError(
"The `gpu` config is deprecated, please use `num_gpus=0|1` "
"instead.")
if "gpu_fraction" in config:
raise ValueError(
"The `gpu_fraction` config is deprecated, please use "
"`num_gpus=<fraction>` instead.")
if "use_gpu_for_workers" in config:
raise ValueError(
"The `use_gpu_for_workers` config is deprecated, please use "
"`num_gpus_per_worker=1` instead.")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
"""Try to identify and blacklist any unhealthy workers.
This method is called after an unexpected remote error is encountered
from a worker. It issues check requests to all current workers and
blacklists any that respond with error. If no healthy workers remain,
an error is raised.
"""
if not self._has_policy_optimizer():
raise NotImplementedError(
"Recovery is not supported for this algorithm")
logger.info("Health checking all workers...")
checks = []
for ev in self.optimizer.workers.remote_workers():
_, obj_id = ev.sample_with_count.remote()
checks.append(obj_id)
healthy_workers = []
for i, obj_id in enumerate(checks):
w = self.optimizer.workers.remote_workers()[i]
try:
ray_get_and_free(obj_id)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Blacklisting worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
self.optimizer.reset(healthy_workers)
def _has_policy_optimizer(self):
return hasattr(self, "optimizer") and isinstance(
self.optimizer, PolicyOptimizer)
@override(Trainable)
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def __getstate__(self):
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
def _register_if_needed(self, env_object):
if isinstance(env_object, six.string_types):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/trainer_template.py
|
Python
|
import time
from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG
from ray.rllib.optimizers import SyncSamplesOptimizer
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override, DeveloperAPI
@DeveloperAPI
def build_trainer(name,
default_policy,
default_config=None,
validate_config=None,
get_initial_state=None,
get_policy_class=None,
before_init=None,
make_workers=None,
make_policy_optimizer=None,
after_init=None,
before_train_step=None,
after_optimizer_step=None,
after_train_result=None,
collect_metrics_fn=None,
before_evaluate_fn=None,
mixins=None):
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_initial_state, get_policy
2. Worker setup: before_init, make_workers, make_policy_optimizer
3. Post setup: after_init
Arguments:
name (str): name of the trainer (e.g., "PPO")
default_policy (cls): the default Policy class to use
default_config (dict): the default config dict of the algorithm,
otherwises uses the Trainer default config
validate_config (func): optional callback that checks a given config
for correctness. It may mutate the config as needed.
get_initial_state (func): optional function that returns the initial
state dict given the trainer instance as an argument. The state
dict must be serializable so that it can be checkpointed, and will
be available as the `trainer.state` variable.
get_policy_class (func): optional callback that takes a config and
returns the policy class to override the default with
before_init (func): optional function to run at the start of trainer
init that takes the trainer instance as argument
make_workers (func): override the method that creates rollout workers.
This takes in (trainer, env_creator, policy, config) as args.
make_policy_optimizer (func): optional function that returns a
PolicyOptimizer instance given (WorkerSet, config)
after_init (func): optional function to run at the end of trainer init
that takes the trainer instance as argument
before_train_step (func): optional callback to run before each train()
call. It takes the trainer instance as an argument.
after_optimizer_step (func): optional callback to run after each
step() call to the policy optimizer. It takes the trainer instance
and the policy gradient fetches as arguments.
after_train_result (func): optional callback to run at the end of each
train() call. It takes the trainer instance and result dict as
arguments, and may mutate the result dict as needed.
collect_metrics_fn (func): override the method used to collect metrics.
It takes the trainer instance as argumnt.
before_evaluate_fn (func): callback to run before evaluation. This
takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class
Returns:
a Trainer instance that uses the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(self, config, env_creator):
if validate_config:
validate_config(config)
if get_initial_state:
self.state = get_initial_state(self)
else:
self.state = {}
if get_policy_class is None:
policy = default_policy
else:
policy = get_policy_class(config)
if before_init:
before_init(self)
if make_workers:
self.workers = make_workers(self, env_creator, policy, config)
else:
self.workers = self._make_workers(env_creator, policy, config,
self.config["num_workers"])
if make_policy_optimizer:
self.optimizer = make_policy_optimizer(self.workers, config)
else:
optimizer_config = dict(
config["optimizer"],
**{"train_batch_size": config["train_batch_size"]})
self.optimizer = SyncSamplesOptimizer(self.workers,
**optimizer_config)
if after_init:
after_init(self)
@override(Trainer)
def _train(self):
if before_train_step:
before_train_step(self)
prev_steps = self.optimizer.num_steps_sampled
start = time.time()
while True:
fetches = self.optimizer.step()
if after_optimizer_step:
after_optimizer_step(self, fetches)
if (time.time() - start >= self.config["min_iter_time_s"]
and self.optimizer.num_steps_sampled - prev_steps >=
self.config["timesteps_per_iteration"]):
break
if collect_metrics_fn:
res = collect_metrics_fn(self)
else:
res = self.collect_metrics()
res.update(
timesteps_this_iter=self.optimizer.num_steps_sampled -
prev_steps,
info=res.get("info", {}))
if after_train_result:
after_train_result(self, res)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["trainer_state"] = self.state.copy()
return state
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.state = state["trainer_state"].copy()
def with_updates(**overrides):
"""Build a copy of this trainer with the specified overrides.
Arguments:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.with_updates = staticmethod(with_updates)
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/core/alpha_zero_policy.py
|
Python
|
import numpy as np
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils.annotations import override
from ray.rllib.contrib.alpha_zero.core.mcts import Node, RootParentNode
from ray.rllib.utils import try_import_torch
torch, _ = try_import_torch()
class AlphaZeroPolicy(TorchPolicy):
def __init__(self, observation_space, action_space, config, model, loss,
action_distribution_class, mcts_creator, env_creator,
**kwargs):
super().__init__(
observation_space, action_space, config, model, loss,
action_distribution_class
)
# we maintain an env copy in the policy that is used during mcts
# simulations
self.env_creator = env_creator
self.mcts = mcts_creator()
self.env = self.env_creator()
self.env.reset()
self.obs_space = observation_space
@override(TorchPolicy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
with torch.no_grad():
input_dict = {"obs": obs_batch}
if prev_action_batch:
input_dict["prev_actions"] = prev_action_batch
if prev_reward_batch:
input_dict["prev_rewards"] = prev_reward_batch
actions = []
for i, episode in enumerate(episodes):
if episode.length == 0:
# if first time step of episode, get initial env state
env_state = episode.user_data["initial_state"]
# verify if env has been wrapped for ranked rewards
if self.env.__class__.__name__ == \
"RankedRewardsEnvWrapper":
# r2 env state contains also the rewards buffer state
env_state = {
"env_state": env_state,
"buffer_state": None
}
# create tree root node
obs = self.env.set_state(env_state)
tree_node = Node(
state=env_state,
obs=obs,
reward=0,
done=False,
action=None,
parent=RootParentNode(env=self.env),
mcts=self.mcts)
else:
# otherwise get last root node from previous time step
tree_node = episode.user_data["tree_node"]
# run monte carlo simulations to compute the actions
# and record the tree
mcts_policy, action, tree_node = self.mcts.compute_action(
tree_node)
# record action
actions.append(action)
# store new node
episode.user_data["tree_node"] = tree_node
# store mcts policies vectors and current tree root node
if episode.length == 0:
episode.user_data["mcts_policies"] = [mcts_policy]
else:
episode.user_data["mcts_policies"].append(mcts_policy)
return np.array(actions), [], self.extra_action_out(
input_dict, state_batches, self.model)
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
# add mcts policies to sample batch
sample_batch["mcts_policies"] = np.array(
episode.user_data["mcts_policies"])[sample_batch["t"]]
# final episode reward corresponds to the value (if not discounted)
# for all transitions in episode
final_reward = sample_batch["rewards"][-1]
# if r2 is enabled, then add the reward to the buffer and normalize it
if self.env.__class__.__name__ == "RankedRewardsEnvWrapper":
self.env.r2_buffer.add_reward(final_reward)
final_reward = self.env.r2_buffer.normalize(final_reward)
sample_batch["value_label"] = final_reward * np.ones_like(
sample_batch["t"])
return sample_batch
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out, policy_loss, value_loss = self._loss(
self, self.model, self.dist_class, train_batch)
self._optimizer.zero_grad()
loss_out.backward()
grad_process_info = self.extra_grad_process()
self._optimizer.step()
grad_info = self.extra_grad_info(train_batch)
grad_info.update(grad_process_info)
grad_info.update({
"total_loss": loss_out.detach().numpy(),
"policy_loss": policy_loss.detach().numpy(),
"value_loss": value_loss.detach().numpy()
})
return {LEARNER_STATS_KEY: grad_info}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/core/alpha_zero_trainer.py
|
Python
|
import logging
from ray.rllib.agents import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.optimizers import SyncSamplesOptimizer
from ray.rllib.utils import try_import_tf, try_import_torch
from ray.tune.registry import ENV_CREATOR, _global_registry
from ray.rllib.contrib.alpha_zero.core.alpha_zero_policy import AlphaZeroPolicy
from ray.rllib.contrib.alpha_zero.core.mcts import MCTS
from ray.rllib.contrib.alpha_zero.core.ranked_rewards import get_r2_env_wrapper
from ray.rllib.contrib.alpha_zero.optimizer.sync_batches_replay_optimizer \
import SyncBatchesReplayOptimizer
tf = try_import_tf()
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
def on_episode_start(info):
# save env state when an episode starts
env = info["env"].get_unwrapped()[0]
state = env.get_state()
episode = info["episode"]
episode.user_data["initial_state"] = state
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# Size of batches collected from each worker
"sample_batch_size": 200,
# Number of timesteps collected for each SGD round
"train_batch_size": 4000,
# Total SGD batch size across all devices for SGD
"sgd_minibatch_size": 128,
# Whether to shuffle sequences in the batch when training (recommended)
"shuffle_sequences": True,
# Number of SGD iterations in each outer loop
"num_sgd_iter": 30,
# IN case a buffer optimizer is used
"learning_starts": 1000,
"buffer_size": 10000,
# Stepsize of SGD
"lr": 5e-5,
# Learning rate schedule
"lr_schedule": None,
# Share layers for value function. If you set this to True, it"s important
# to tune vf_loss_coeff.
"vf_share_layers": False,
# Whether to rollout "complete_episodes" or "truncate_episodes"
"batch_mode": "complete_episodes",
# Which observation filter to apply to the observation
"observation_filter": "NoFilter",
# Uses the sync samples optimizer instead of the multi-gpu one. This does
# not support minibatches.
"simple_optimizer": True,
# === MCTS ===
"mcts_config": {
"puct_coefficient": 1.0,
"num_simulations": 30,
"temperature": 1.5,
"dirichlet_epsilon": 0.25,
"dirichlet_noise": 0.03,
"argmax_tree_policy": False,
"add_dirichlet_noise": True,
},
# === Ranked Rewards ===
# implement the ranked reward (r2) algorithm
# from: https://arxiv.org/pdf/1807.01672.pdf
"ranked_rewards": {
"enable": True,
"percentile": 75,
"buffer_max_length": 1000,
# add rewards obtained from random policy to
# "warm start" the buffer
"initialize_buffer": True,
"num_init_rewards": 100,
},
# === Evaluation ===
# Extra configuration that disables exploration.
"evaluation_config": {
"mcts_config": {
"argmax_tree_policy": True,
"add_dirichlet_noise": False,
},
},
# === Callbacks ===
"callbacks": {
"on_episode_start": on_episode_start,
}
})
# __sphinx_doc_end__
# yapf: enable
def choose_policy_optimizer(workers, config):
if config["simple_optimizer"]:
return SyncSamplesOptimizer(
workers,
num_sgd_iter=config["num_sgd_iter"],
train_batch_size=config["train_batch_size"])
else:
return SyncBatchesReplayOptimizer(
workers,
num_gradient_descents=config["num_sgd_iter"],
learning_starts=config["learning_starts"],
train_batch_size=config["train_batch_size"],
buffer_size=config["buffer_size"])
def alpha_zero_loss(policy, model, dist_class, train_batch):
# get inputs unflattened inputs
input_dict = restore_original_dimensions(train_batch["obs"],
policy.observation_space, "torch")
# forward pass in model
model_out = model.forward(input_dict, None, [1])
logits, _ = model_out
values = model.value_function()
logits, values = torch.squeeze(logits), torch.squeeze(values)
priors = nn.Softmax(dim=-1)(logits)
# compute actor and critic losses
policy_loss = torch.mean(
-torch.sum(train_batch["mcts_policies"] * torch.log(priors), dim=-1))
value_loss = torch.mean(torch.pow(values - train_batch["value_label"], 2))
# compute total loss
total_loss = (policy_loss + value_loss) / 2
return total_loss, policy_loss, value_loss
class AlphaZeroPolicyWrapperClass(AlphaZeroPolicy):
def __init__(self, obs_space, action_space, config):
model = ModelCatalog.get_model_v2(
obs_space, action_space, action_space.n, config["model"], "torch")
env_creator = _global_registry.get(ENV_CREATOR, config["env"])
if config["ranked_rewards"]["enable"]:
# if r2 is enabled, tne env is wrapped to include a rewards buffer
# used to normalize rewards
env_cls = get_r2_env_wrapper(env_creator, config["ranked_rewards"])
# the wrapped env is used only in the mcts, not in the
# rollout workers
def _env_creator():
return env_cls(config["env_config"])
else:
def _env_creator():
return env_creator(config["env_config"])
def mcts_creator():
return MCTS(model, config["mcts_config"])
super().__init__(
obs_space, action_space, config, model, alpha_zero_loss,
TorchCategorical, mcts_creator, _env_creator
)
AlphaZeroTrainer = build_trainer(
name="AlphaZero",
default_config=DEFAULT_CONFIG,
default_policy=AlphaZeroPolicyWrapperClass,
make_policy_optimizer=choose_policy_optimizer)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/core/mcts.py
|
Python
|
"""
Mcts implementation modified from
https://github.com/brilee/python_uct/blob/master/numpy_impl.py
"""
import collections
import math
import numpy as np
class Node:
def __init__(self, action, obs, done, reward, state, mcts, parent=None):
self.env = parent.env
self.action = action # Action used to go to this state
self.is_expanded = False
self.parent = parent
self.children = {}
self.action_space_size = self.env.action_space.n
self.child_total_value = np.zeros(
[self.action_space_size], dtype=np.float32) # Q
self.child_priors = np.zeros(
[self.action_space_size], dtype=np.float32) # P
self.child_number_visits = np.zeros(
[self.action_space_size], dtype=np.float32) # N
self.valid_actions = obs["action_mask"].astype(np.bool)
self.reward = reward
self.done = done
self.state = state
self.obs = obs
self.mcts = mcts
@property
def number_visits(self):
return self.parent.child_number_visits[self.action]
@number_visits.setter
def number_visits(self, value):
self.parent.child_number_visits[self.action] = value
@property
def total_value(self):
return self.parent.child_total_value[self.action]
@total_value.setter
def total_value(self, value):
self.parent.child_total_value[self.action] = value
def child_Q(self):
# TODO (weak todo) add "softmax" version of the Q-value
return self.child_total_value / (1 + self.child_number_visits)
def child_U(self):
return math.sqrt(self.number_visits) * self.child_priors / (
1 + self.child_number_visits)
def best_action(self):
"""
:return: action
"""
child_score = self.child_Q() + self.mcts.c_puct * self.child_U()
masked_child_score = child_score
masked_child_score[~self.valid_actions] = -np.inf
return np.argmax(masked_child_score)
def select(self):
current_node = self
while current_node.is_expanded:
best_action = current_node.best_action()
current_node = current_node.get_child(best_action)
return current_node
def expand(self, child_priors):
self.is_expanded = True
self.child_priors = child_priors
def get_child(self, action):
if action not in self.children:
self.env.set_state(self.state)
obs, reward, done, _ = self.env.step(action)
next_state = self.env.get_state()
self.children[action] = Node(
state=next_state,
action=action,
parent=self,
reward=reward,
done=done,
obs=obs,
mcts=self.mcts)
return self.children[action]
def backup(self, value):
current = self
while current.parent is not None:
current.number_visits += 1
current.total_value += value
current = current.parent
class RootParentNode:
def __init__(self, env):
self.parent = None
self.child_total_value = collections.defaultdict(float)
self.child_number_visits = collections.defaultdict(float)
self.env = env
class MCTS:
def __init__(self, model, mcts_param):
self.model = model
self.temperature = mcts_param["temperature"]
self.dir_epsilon = mcts_param["dirichlet_epsilon"]
self.dir_noise = mcts_param["dirichlet_noise"]
self.num_sims = mcts_param["num_simulations"]
self.exploit = mcts_param["argmax_tree_policy"]
self.add_dirichlet_noise = mcts_param["add_dirichlet_noise"]
self.c_puct = mcts_param["puct_coefficient"]
def compute_action(self, node):
for _ in range(self.num_sims):
leaf = node.select()
if leaf.done:
value = leaf.reward
else:
child_priors, value = self.model.compute_priors_and_value(
leaf.obs)
if self.add_dirichlet_noise:
child_priors = (1 - self.dir_epsilon) * child_priors
child_priors += self.dir_epsilon * np.random.dirichlet(
[self.dir_noise] * child_priors.size)
leaf.expand(child_priors)
leaf.backup(value)
# Tree policy target (TPT)
tree_policy = node.child_number_visits / node.number_visits
tree_policy = tree_policy / np.max(
tree_policy) # to avoid overflows when computing softmax
tree_policy = np.power(tree_policy, self.temperature)
tree_policy = tree_policy / np.sum(tree_policy)
if self.exploit:
# if exploit then choose action that has the maximum
# tree policy probability
action = np.argmax(tree_policy)
else:
# otherwise sample an action according to tree policy probabilities
action = np.random.choice(
np.arange(node.action_space_size), p=tree_policy)
return tree_policy, action, node.children[action]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/core/ranked_rewards.py
|
Python
|
from copy import deepcopy
import numpy as np
class RankedRewardsBuffer:
def __init__(self, buffer_max_length, percentile):
self.buffer_max_length = buffer_max_length
self.percentile = percentile
self.buffer = []
def add_reward(self, reward):
if len(self.buffer) < self.buffer_max_length:
self.buffer.append(reward)
else:
self.buffer = self.buffer[1:] + [reward]
def normalize(self, reward):
reward_threshold = np.percentile(self.buffer, self.percentile)
if reward < reward_threshold:
return -1.0
else:
return 1.0
def get_state(self):
return np.array(self.buffer)
def set_state(self, state):
if state is not None:
self.buffer = list(state)
def get_r2_env_wrapper(env_creator, r2_config):
class RankedRewardsEnvWrapper:
def __init__(self, env_config):
self.env = env_creator(env_config)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
max_buffer_length = r2_config["buffer_max_length"]
percentile = r2_config["percentile"]
self.r2_buffer = RankedRewardsBuffer(max_buffer_length, percentile)
if r2_config["initialize_buffer"]:
self._initialize_buffer(r2_config["num_init_rewards"])
def _initialize_buffer(self, num_init_rewards=100):
# initialize buffer with random policy
for _ in range(num_init_rewards):
obs = self.env.reset()
done = False
while not done:
mask = obs["action_mask"]
probs = mask / mask.sum()
action = np.random.choice(
np.arange(mask.shape[0]), p=probs)
obs, reward, done, _ = self.env.step(action)
self.r2_buffer.add_reward(reward)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if done:
reward = self.r2_buffer.normalize(reward)
return obs, reward, done, info
def get_state(self):
state = {
"env_state": self.env.get_state(),
"buffer_state": self.r2_buffer.get_state()
}
return deepcopy(state)
def reset(self):
return self.env.reset()
def set_state(self, state):
obs = self.env.set_state(state["env_state"])
self.r2_buffer.set_state(state["buffer_state"])
return obs
return RankedRewardsEnvWrapper
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/environments/cartpole.py
|
Python
|
from copy import deepcopy
import gym
import numpy as np
from gym.spaces import Discrete, Dict, Box
class CartPole:
"""
Wrapper for gym CartPole environment where the reward
is accumulated to the end
"""
def __init__(self, config=None):
self.env = gym.make("CartPole-v0")
self.action_space = Discrete(2)
self.observation_space = Dict({
"obs": self.env.observation_space,
"action_mask": Box(low=0, high=1, shape=(self.action_space.n, ))
})
self.running_reward = 0
def reset(self):
self.running_reward = 0
return {"obs": self.env.reset(), "action_mask": np.array([1, 1])}
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.running_reward += rew
score = self.running_reward if done else 0
return {"obs": obs, "action_mask": np.array([1, 1])}, score, done, info
def set_state(self, state):
self.running_reward = state[1]
self.env = deepcopy(state[0])
obs = np.array(list(self.env.unwrapped.state))
return {"obs": obs, "action_mask": np.array([1, 1])}
def get_state(self):
return deepcopy(self.env), self.running_reward
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/examples/train_cartpole.py
|
Python
|
"""Example of using training on CartPole."""
import argparse
from ray import tune
from ray.rllib.contrib.alpha_zero.models.custom_torch_models import DenseModel
from ray.rllib.contrib.alpha_zero.environments.cartpole import CartPole
from ray.rllib.models.catalog import ModelCatalog
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-workers", default=6, type=int)
parser.add_argument("--training-iteration", default=10000, type=int)
args = parser.parse_args()
ModelCatalog.register_custom_model("dense_model", DenseModel)
tune.run(
"contrib/AlphaZero",
stop={"training_iteration": args.training_iteration},
max_failures=0,
config={
"env": CartPole,
"num_workers": args.num_workers,
"sample_batch_size": 50,
"train_batch_size": 500,
"sgd_minibatch_size": 64,
"lr": 1e-4,
"num_sgd_iter": 1,
"mcts_config": {
"puct_coefficient": 1.5,
"num_simulations": 100,
"temperature": 1.0,
"dirichlet_epsilon": 0.20,
"dirichlet_noise": 0.03,
"argmax_tree_policy": False,
"add_dirichlet_noise": True,
},
"ranked_rewards": {
"enable": True,
},
"model": {
"custom_model": "dense_model",
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/models/custom_torch_models.py
|
Python
|
from abc import ABC
import numpy as np
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
def convert_to_tensor(arr):
tensor = torch.from_numpy(np.asarray(arr))
if tensor.dtype == torch.double:
tensor = tensor.float()
return tensor
class ActorCriticModel(TorchModelV2, nn.Module, ABC):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.preprocessor = get_preprocessor(obs_space.original_space)(
obs_space.original_space)
self.shared_layers = None
self.actor_layers = None
self.critic_layers = None
self._value_out = None
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]
x = self.shared_layers(x)
# actor outputs
logits = self.actor_layers(x)
# compute value
self._value_out = self.critic_layers(x)
return logits, None
def value_function(self):
return self._value_out
def compute_priors_and_value(self, obs):
obs = convert_to_tensor([self.preprocessor.transform(obs)])
input_dict = restore_original_dimensions(obs, self.obs_space, "torch")
with torch.no_grad():
model_out = self.forward(input_dict, None, [1])
logits, _ = model_out
value = self.value_function()
logits, value = torch.squeeze(logits), torch.squeeze(value)
priors = nn.Softmax(dim=-1)(logits)
priors = priors.cpu().numpy()
value = value.cpu().numpy()
return priors, value
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class ConvNetModel(ActorCriticModel):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
ActorCriticModel.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
in_channels = model_config["custom_options"]["in_channels"]
feature_dim = model_config["custom_options"]["feature_dim"]
self.shared_layers = nn.Sequential(
nn.Conv2d(in_channels, 32, kernel_size=4, stride=2),
nn.Conv2d(32, 64, kernel_size=2, stride=1),
nn.Conv2d(64, 64, kernel_size=2, stride=1), Flatten(),
nn.Linear(1024, feature_dim))
self.actor_layers = nn.Sequential(
nn.Linear(in_features=feature_dim, out_features=action_space.n))
self.critic_layers = nn.Sequential(
nn.Linear(in_features=feature_dim, out_features=1))
self._value_out = None
class DenseModel(ActorCriticModel):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
ActorCriticModel.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
self.shared_layers = nn.Sequential(
nn.Linear(
in_features=obs_space.original_space["obs"].shape[0],
out_features=256), nn.Linear(
in_features=256, out_features=256))
self.actor_layers = nn.Sequential(
nn.Linear(in_features=256, out_features=action_space.n))
self.critic_layers = nn.Sequential(
nn.Linear(in_features=256, out_features=1))
self._value_out = None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/alpha_zero/optimizer/sync_batches_replay_optimizer.py
|
Python
|
import random
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.optimizers.sync_batch_replay_optimizer import \
SyncBatchReplayOptimizer
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
class SyncBatchesReplayOptimizer(SyncBatchReplayOptimizer):
def __init__(self,
workers,
learning_starts=1000,
buffer_size=10000,
train_batch_size=32,
num_gradient_descents=10):
super(SyncBatchesReplayOptimizer, self).__init__(
workers, learning_starts, buffer_size, train_batch_size)
self.num_sgds = num_gradient_descents
@override(SyncBatchReplayOptimizer)
def _optimize(self):
for _ in range(self.num_sgds):
samples = [random.choice(self.replay_buffer)]
while sum(s.count for s in samples) < self.train_batch_size:
samples.append(random.choice(self.replay_buffer))
samples = SampleBatch.concat_samples(samples)
with self.grad_timer:
info_dict = self.workers.local_worker().learn_on_batch(samples)
for policy_id, info in info_dict.items():
self.learner_stats[policy_id] = get_learner_stats(info)
self.grad_timer.push_units_processed(samples.count)
self.num_steps_trained += samples.count
return info_dict
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/maddpg/__init__.py
|
Python
|
from ray.rllib.contrib.maddpg.maddpg import MADDPGTrainer, DEFAULT_CONFIG
__all__ = ["MADDPGTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/maddpg/maddpg.py
|
Python
|
"""Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see twostep_game.py, and the README for how to run
with the multi-agent particle envs.
"""
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.contrib.maddpg.maddpg_policy import MADDPGTFPolicy
from ray.rllib.optimizers import SyncReplayOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies
"good_policy": "maddpg",
# Algorithm for adversary policies
"adv_policy": "maddpg",
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 0.5,
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"sample_batch_size": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 0,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 0,
})
# __sphinx_doc_end__
# yapf: enable
def set_global_timestep(trainer):
global_timestep = trainer.optimizer.num_steps_sampled
trainer.train_start_timestep = global_timestep
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].data.keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(
dict(
zip(keys,
multi_agent_batch.policy_batches[pid].data.values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
target_act_sampler_n = [p.target_act_sampler for p in policies.values()]
feed_dict = dict(zip(new_obs_ph_n, new_obs_n))
new_act_n = p.sess.run(target_act_sampler_n, feed_dict)
samples.update(
{"new_actions_%d" % i: new_act
for i, new_act in enumerate(new_act_n)})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
def make_optimizer(workers, config):
return SyncReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
train_batch_size=config["train_batch_size"],
before_learn_on_batch=before_learn_on_batch,
synchronize_sampling=True,
prioritized_replay=False)
def add_trainer_metrics(trainer, result):
global_timestep = trainer.optimizer.num_steps_sampled
result.update(
timesteps_this_iter=global_timestep - trainer.train_start_timestep,
info=dict({
"num_target_updates": trainer.state["num_target_updates"],
}, **trainer.optimizer.stats()))
def collect_metrics(trainer):
result = trainer.collect_metrics()
return result
MADDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="MADDPG",
default_config=DEFAULT_CONFIG,
default_policy=MADDPGTFPolicy,
before_init=None,
before_train_step=set_global_timestep,
make_policy_optimizer=make_optimizer,
after_train_result=add_trainer_metrics,
collect_metrics_fn=collect_metrics,
before_evaluate_fn=None)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/maddpg/maddpg_policy.py
|
Python
|
import ray
from ray.rllib.agents.dqn.dqn_policy import minimize_and_clip, _adjust_nstep
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils import try_import_tf, try_import_tfp
import logging
from gym.spaces import Box, Discrete
import numpy as np
logger = logging.getLogger(__name__)
tf = try_import_tf()
tfp = try_import_tfp()
class MADDPGPostprocessing:
"""Implements agentwise termination signal and n-step learning."""
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
# FIXME: Get done from info is required since agentwise done is not
# supported now.
sample_batch.data["dones"] = self.get_done_from_info(
sample_batch.data["infos"])
# N-step Q adjustments
if self.config["n_step"] > 1:
_adjust_nstep(self.config["n_step"], self.config["gamma"],
sample_batch[SampleBatch.CUR_OBS],
sample_batch[SampleBatch.ACTIONS],
sample_batch[SampleBatch.REWARDS],
sample_batch[SampleBatch.NEXT_OBS],
sample_batch[SampleBatch.DONES])
return sample_batch
class MADDPGTFPolicy(MADDPGPostprocessing, TFPolicy):
def __init__(self, obs_space, act_space, config):
# _____ Initial Configuration
config = dict(ray.rllib.contrib.maddpg.DEFAULT_CONFIG, **config)
self.global_step = tf.train.get_or_create_global_step()
# FIXME: Get done from info is required since agentwise done is not
# supported now.
self.get_done_from_info = np.vectorize(
lambda info: info.get("done", False))
agent_id = config["agent_id"]
if agent_id is None:
raise ValueError("Must set `agent_id` in the policy config.")
if type(agent_id) is not int:
raise ValueError("Agent ids must be integers for MADDPG.")
# _____ Environment Setting
def _make_continuous_space(space):
if isinstance(space, Box):
return space
elif isinstance(space, Discrete):
return Box(
low=np.zeros((space.n, )), high=np.ones((space.n, )))
else:
raise UnsupportedSpaceException(
"Space {} is not supported.".format(space))
obs_space_n = [
_make_continuous_space(space)
for _, (_, space, _,
_) in sorted(config["multiagent"]["policies"].items())
]
act_space_n = [
_make_continuous_space(space)
for _, (_, _, space,
_) in sorted(config["multiagent"]["policies"].items())
]
# _____ Placeholders
# Placeholders for policy evaluation and updates
def _make_ph_n(space_n, name=""):
return [
tf.placeholder(
tf.float32,
shape=(None, ) + space.shape,
name=name + "_%d" % i) for i, space in enumerate(space_n)
]
obs_ph_n = _make_ph_n(obs_space_n, "obs")
act_ph_n = _make_ph_n(act_space_n, "actions")
new_obs_ph_n = _make_ph_n(obs_space_n, "new_obs")
new_act_ph_n = _make_ph_n(act_space_n, "new_actions")
rew_ph = tf.placeholder(
tf.float32, shape=None, name="rewards_{}".format(agent_id))
done_ph = tf.placeholder(
tf.float32, shape=None, name="dones_{}".format(agent_id))
if config["use_local_critic"]:
obs_space_n, act_space_n = [obs_space_n[agent_id]], [
act_space_n[agent_id]
]
obs_ph_n, act_ph_n = [obs_ph_n[agent_id]], [act_ph_n[agent_id]]
new_obs_ph_n, new_act_ph_n = [new_obs_ph_n[agent_id]], [
new_act_ph_n[agent_id]
]
agent_id = 0
# _____ Value Network
# Build critic network for t.
critic, _, critic_model_n, critic_vars = self._build_critic_network(
obs_ph_n,
act_ph_n,
obs_space_n,
act_space_n,
config["use_state_preprocessor"],
config["critic_hiddens"],
getattr(tf.nn, config["critic_hidden_activation"]),
scope="critic")
# Build critic network for t + 1.
target_critic, _, _, target_critic_vars = self._build_critic_network(
new_obs_ph_n,
new_act_ph_n,
obs_space_n,
act_space_n,
config["use_state_preprocessor"],
config["critic_hiddens"],
getattr(tf.nn, config["critic_hidden_activation"]),
scope="target_critic")
# Build critic loss.
td_error = tf.subtract(
tf.stop_gradient(
rew_ph + (1.0 - done_ph) *
(config["gamma"]**config["n_step"]) * target_critic[:, 0]),
critic[:, 0])
critic_loss = tf.reduce_mean(td_error**2)
# _____ Policy Network
# Build actor network for t.
act_sampler, actor_feature, actor_model, actor_vars = (
self._build_actor_network(
obs_ph_n[agent_id],
obs_space_n[agent_id],
act_space_n[agent_id],
config["use_state_preprocessor"],
config["actor_hiddens"],
getattr(tf.nn, config["actor_hidden_activation"]),
scope="actor"))
# Build actor network for t + 1.
self.new_obs_ph = new_obs_ph_n[agent_id]
self.target_act_sampler, _, _, target_actor_vars = (
self._build_actor_network(
self.new_obs_ph,
obs_space_n[agent_id],
act_space_n[agent_id],
config["use_state_preprocessor"],
config["actor_hiddens"],
getattr(tf.nn, config["actor_hidden_activation"]),
scope="target_actor"))
# Build actor loss.
act_n = act_ph_n.copy()
act_n[agent_id] = act_sampler
critic, _, _, _ = self._build_critic_network(
obs_ph_n,
act_n,
obs_space_n,
act_space_n,
config["use_state_preprocessor"],
config["critic_hiddens"],
getattr(tf.nn, config["critic_hidden_activation"]),
scope="critic")
actor_loss = -tf.reduce_mean(critic)
if config["actor_feature_reg"] is not None:
actor_loss += config["actor_feature_reg"] * tf.reduce_mean(
actor_feature**2)
# _____ Losses
self.losses = {"critic": critic_loss, "actor": actor_loss}
# _____ Optimizers
self.optimizers = {
"critic": tf.train.AdamOptimizer(config["critic_lr"]),
"actor": tf.train.AdamOptimizer(config["actor_lr"])
}
# _____ Build variable update ops.
self.tau = tf.placeholder_with_default(
config["tau"], shape=(), name="tau")
def _make_target_update_op(vs, target_vs, tau):
return [
target_v.assign(tau * v + (1.0 - tau) * target_v)
for v, target_v in zip(vs, target_vs)
]
self.update_target_vars = _make_target_update_op(
critic_vars + actor_vars, target_critic_vars + target_actor_vars,
self.tau)
def _make_set_weight_op(variables):
vs = list()
for v in variables.values():
vs += v
phs = [
tf.placeholder(
tf.float32,
shape=v.get_shape(),
name=v.name.split(":")[0] + "_ph") for v in vs
]
return tf.group(*[v.assign(ph) for v, ph in zip(vs, phs)]), phs
self.vars = {
"critic": critic_vars,
"actor": actor_vars,
"target_critic": target_critic_vars,
"target_actor": target_actor_vars
}
self.update_vars, self.vars_ph = _make_set_weight_op(self.vars)
# _____ TensorFlow Initialization
self.sess = tf.get_default_session()
def _make_loss_inputs(placeholders):
return [(ph.name.split("/")[-1].split(":")[0], ph)
for ph in placeholders]
loss_inputs = _make_loss_inputs(obs_ph_n + act_ph_n + new_obs_ph_n +
new_act_ph_n + [rew_ph, done_ph])
TFPolicy.__init__(
self,
obs_space,
act_space,
config=config,
sess=self.sess,
obs_input=obs_ph_n[agent_id],
action_sampler=act_sampler,
loss=actor_loss + critic_loss,
loss_inputs=loss_inputs)
self.sess.run(tf.global_variables_initializer())
# Hard initial update
self.update_target(1.0)
@override(TFPolicy)
def optimizer(self):
return None
@override(TFPolicy)
def gradients(self, optimizer, loss):
if self.config["grad_norm_clipping"] is not None:
self.gvs = {
k: minimize_and_clip(optimizer, self.losses[k], self.vars[k],
self.config["grad_norm_clipping"])
for k, optimizer in self.optimizers.items()
}
else:
self.gvs = {
k: optimizer.compute_gradients(self.losses[k], self.vars[k])
for k, optimizer in self.optimizers.items()
}
return self.gvs["critic"] + self.gvs["actor"]
@override(TFPolicy)
def build_apply_op(self, optimizer, grads_and_vars):
critic_apply_op = self.optimizers["critic"].apply_gradients(
self.gvs["critic"])
with tf.control_dependencies([tf.assign_add(self.global_step, 1)]):
with tf.control_dependencies([critic_apply_op]):
actor_apply_op = self.optimizers["actor"].apply_gradients(
self.gvs["actor"])
return actor_apply_op
@override(TFPolicy)
def extra_compute_action_feed_dict(self):
return {}
@override(TFPolicy)
def extra_compute_grad_fetches(self):
return {LEARNER_STATS_KEY: {}}
@override(TFPolicy)
def get_weights(self):
var_list = []
for var in self.vars.values():
var_list += var
return self.sess.run(var_list)
@override(TFPolicy)
def set_weights(self, weights):
self.sess.run(
self.update_vars, feed_dict=dict(zip(self.vars_ph, weights)))
@override(Policy)
def get_state(self):
return TFPolicy.get_state(self)
@override(Policy)
def set_state(self, state):
TFPolicy.set_state(self, state)
def _build_critic_network(self,
obs_n,
act_n,
obs_space_n,
act_space_n,
use_state_preprocessor,
hiddens,
activation=None,
scope=None):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as scope:
if use_state_preprocessor:
model_n = [
ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, act_space, 1, self.config["model"])
for obs, obs_space, act_space in zip(
obs_n, obs_space_n, act_space_n)
]
out_n = [model.last_layer for model in model_n]
out = tf.concat(out_n + act_n, axis=1)
else:
model_n = [None] * len(obs_n)
out = tf.concat(obs_n + act_n, axis=1)
for hidden in hiddens:
out = tf.layers.dense(
out, units=hidden, activation=activation
)
feature = out
out = tf.layers.dense(feature, units=1, activation=None)
return out, feature, model_n, tf.global_variables(scope.name)
def _build_actor_network(self,
obs,
obs_space,
act_space,
use_state_preprocessor,
hiddens,
activation=None,
scope=None):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as scope:
if use_state_preprocessor:
model = ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, act_space, 1, self.config["model"])
out = model.last_layer
else:
model = None
out = obs
for hidden in hiddens:
out = tf.layers.dense(
out, units=hidden, activation=activation
)
feature = tf.layers.dense(
out, units=act_space.shape[0], activation=None)
sampler = tfp.distributions.RelaxedOneHotCategorical(
temperature=1.0, logits=feature).sample()
return sampler, feature, model, tf.global_variables(scope.name)
def update_target(self, tau=None):
if tau is not None:
self.sess.run(self.update_target_vars, {self.tau: tau})
else:
self.sess.run(self.update_target_vars)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/random_agent/random_agent.py
|
Python
|
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
# yapf: disable
# __sphinx_doc_begin__
class RandomAgent(Trainer):
"""Policy that takes random actions and never learns."""
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
# __sphinx_doc_end__
# don't enable yapf after, it's buggy here
if __name__ == "__main__":
trainer = RandomAgent(
env="CartPole-v0", config={"rollouts_per_iteration": 10})
result = trainer.train()
assert result["episode_reward_mean"] > 10, result
print("Test: OK")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/contrib/registry.py
|
Python
|
"""Registry of algorithm names for `rllib train --run=<alg_name>`"""
def _import_random_agent():
from ray.rllib.contrib.random_agent.random_agent import RandomAgent
return RandomAgent
def _import_maddpg():
from ray.rllib.contrib import maddpg
return maddpg.MADDPGTrainer
def _import_alphazero():
from ray.rllib.contrib.alpha_zero.core.alpha_zero_trainer import\
AlphaZeroTrainer
return AlphaZeroTrainer
CONTRIBUTED_ALGORITHMS = {
"contrib/RandomAgent": _import_random_agent,
"contrib/MADDPG": _import_maddpg,
"contrib/AlphaZero": _import_alphazero,
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/__init__.py
|
Python
|
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator
from ray.rllib.evaluation.interface import EvaluatorInterface
from ray.rllib.evaluation.policy_graph import PolicyGraph
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph
from ray.rllib.evaluation.torch_policy_graph import TorchPolicyGraph
from ray.rllib.evaluation.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.evaluation.sample_batch_builder import (
SampleBatchBuilder, MultiAgentSampleBatchBuilder)
from ray.rllib.evaluation.sampler import SyncSampler, AsyncSampler
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.evaluation.metrics import collect_metrics
__all__ = [
"EvaluatorInterface",
"RolloutWorker",
"PolicyGraph",
"TFPolicyGraph",
"TorchPolicyGraph",
"SampleBatch",
"MultiAgentBatch",
"SampleBatchBuilder",
"MultiAgentSampleBatchBuilder",
"SyncSampler",
"AsyncSampler",
"compute_advantages",
"collect_metrics",
"MultiAgentEpisode",
"PolicyEvaluator",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/episode.py
|
Python
|
from collections import defaultdict
import random
import numpy as np
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.utils.annotations import DeveloperAPI
@DeveloperAPI
class MultiAgentEpisode:
"""Tracks the current state of a (possibly multi-agent) episode.
Attributes:
new_batch_builder (func): Create a new MultiAgentSampleBatchBuilder.
add_extra_batch (func): Return a built MultiAgentBatch to the sampler.
batch_builder (obj): Batch builder for the current episode.
total_reward (float): Summed reward across all agents in this episode.
length (int): Length of this episode.
episode_id (int): Unique id identifying this trajectory.
agent_rewards (dict): Summed rewards broken down by agent.
custom_metrics (dict): Dict where the you can add custom metrics.
user_data (dict): Dict that you can use for temporary storage.
Use case 1: Model-based rollouts in multi-agent:
A custom compute_actions() function in a policy can inspect the
current episode state and perform a number of rollouts based on the
policies and state of other agents in the environment.
Use case 2: Returning extra rollouts data.
The model rollouts can be returned back to the sampler by calling:
>>> batch = episode.new_batch_builder()
>>> for each transition:
batch.add_values(...) # see sampler for usage
>>> episode.extra_batches.add(batch.build_and_reset())
"""
def __init__(self, policies, policy_mapping_fn, batch_builder_factory,
extra_batch_callback):
self.new_batch_builder = batch_builder_factory
self.add_extra_batch = extra_batch_callback
self.batch_builder = batch_builder_factory()
self.total_reward = 0.0
self.length = 0
self.episode_id = random.randrange(2e9)
self.agent_rewards = defaultdict(float)
self.custom_metrics = {}
self.user_data = {}
self._policies = policies
self._policy_mapping_fn = policy_mapping_fn
self._next_agent_index = 0
self._agent_to_index = {}
self._agent_to_policy = {}
self._agent_to_rnn_state = {}
self._agent_to_last_obs = {}
self._agent_to_last_raw_obs = {}
self._agent_to_last_info = {}
self._agent_to_last_action = {}
self._agent_to_last_pi_info = {}
self._agent_to_prev_action = {}
self._agent_reward_history = defaultdict(list)
@DeveloperAPI
def soft_reset(self):
"""Clears rewards and metrics, but retains RNN and other state.
This is used to carry state across multiple logical episodes in the
same env (i.e., if `soft_horizon` is set).
"""
self.length = 0
self.episode_id = random.randrange(2e9)
self.total_reward = 0.0
self.agent_rewards = defaultdict(float)
self._agent_reward_history = defaultdict(list)
@DeveloperAPI
def policy_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the policy for the specified agent.
If the agent is new, the policy mapping fn will be called to bind the
agent to a policy for the duration of the episode.
"""
if agent_id not in self._agent_to_policy:
self._agent_to_policy[agent_id] = self._policy_mapping_fn(agent_id)
return self._agent_to_policy[agent_id]
@DeveloperAPI
def last_observation_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last observation for the specified agent."""
return self._agent_to_last_obs.get(agent_id)
@DeveloperAPI
def last_raw_obs_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last un-preprocessed obs for the specified agent."""
return self._agent_to_last_raw_obs.get(agent_id)
@DeveloperAPI
def last_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info for the specified agent."""
return self._agent_to_last_info.get(agent_id)
@DeveloperAPI
def last_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last action for the specified agent, or zeros."""
if agent_id in self._agent_to_last_action:
return _flatten_action(self._agent_to_last_action[agent_id])
else:
policy = self._policies[self.policy_for(agent_id)]
flat = _flatten_action(policy.action_space.sample())
return np.zeros_like(flat)
@DeveloperAPI
def prev_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous action for the specified agent."""
if agent_id in self._agent_to_prev_action:
return _flatten_action(self._agent_to_prev_action[agent_id])
else:
# We're at t=0, so return all zeros.
return np.zeros_like(self.last_action_for(agent_id))
@DeveloperAPI
def prev_reward_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous reward for the specified agent."""
history = self._agent_reward_history[agent_id]
if len(history) >= 2:
return history[-2]
else:
# We're at t=0, so there is no previous reward, just return zero.
return 0.0
@DeveloperAPI
def rnn_state_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last RNN state for the specified agent."""
if agent_id not in self._agent_to_rnn_state:
policy = self._policies[self.policy_for(agent_id)]
self._agent_to_rnn_state[agent_id] = policy.get_initial_state()
return self._agent_to_rnn_state[agent_id]
@DeveloperAPI
def last_pi_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info object for the specified agent."""
return self._agent_to_last_pi_info[agent_id]
def _add_agent_rewards(self, reward_dict):
for agent_id, reward in reward_dict.items():
if reward is not None:
self.agent_rewards[agent_id,
self.policy_for(agent_id)] += reward
self.total_reward += reward
self._agent_reward_history[agent_id].append(reward)
def _set_rnn_state(self, agent_id, rnn_state):
self._agent_to_rnn_state[agent_id] = rnn_state
def _set_last_observation(self, agent_id, obs):
self._agent_to_last_obs[agent_id] = obs
def _set_last_raw_obs(self, agent_id, obs):
self._agent_to_last_raw_obs[agent_id] = obs
def _set_last_info(self, agent_id, info):
self._agent_to_last_info[agent_id] = info
def _set_last_action(self, agent_id, action):
if agent_id in self._agent_to_last_action:
self._agent_to_prev_action[agent_id] = \
self._agent_to_last_action[agent_id]
self._agent_to_last_action[agent_id] = action
def _set_last_pi_info(self, agent_id, pi_info):
self._agent_to_last_pi_info[agent_id] = pi_info
def _agent_index(self, agent_id):
if agent_id not in self._agent_to_index:
self._agent_to_index[agent_id] = self._next_agent_index
self._next_agent_index += 1
return self._agent_to_index[agent_id]
def _flatten_action(action):
# Concatenate tuple actions
if isinstance(action, list) or isinstance(action, tuple):
expanded = []
for a in action:
expanded.append(np.reshape(a, [-1]))
action = np.concatenate(expanded, axis=0).flatten()
return action
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/interface.py
|
Python
|
import os
from ray.rllib.utils.annotations import DeveloperAPI
@DeveloperAPI
class EvaluatorInterface:
"""This is the interface between policy optimizers and policy evaluation.
See also: RolloutWorker
"""
@DeveloperAPI
def sample(self):
"""Returns a batch of experience sampled from this evaluator.
This method must be implemented by subclasses.
Returns:
SampleBatch|MultiAgentBatch: A columnar batch of experiences
(e.g., tensors), or a multi-agent batch.
Examples:
>>> print(ev.sample())
SampleBatch({"obs": [1, 2, 3], "action": [0, 1, 0], ...})
"""
raise NotImplementedError
@DeveloperAPI
def learn_on_batch(self, samples):
"""Update policies based on the given batch.
This is the equivalent to apply_gradients(compute_gradients(samples)),
but can be optimized to avoid pulling gradients into CPU memory.
Either this or the combination of compute/apply grads must be
implemented by subclasses.
Returns:
info: dictionary of extra metadata from compute_gradients().
Examples:
>>> batch = ev.sample()
>>> ev.learn_on_batch(samples)
"""
grads, info = self.compute_gradients(samples)
self.apply_gradients(grads)
return info
@DeveloperAPI
def compute_gradients(self, samples):
"""Returns a gradient computed w.r.t the specified samples.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
(grads, info): A list of gradients that can be applied on a
compatible evaluator. In the multi-agent case, returns a dict
of gradients keyed by policy ids. An info dictionary of
extra metadata is also returned.
Examples:
>>> batch = ev.sample()
>>> grads, info = ev2.compute_gradients(samples)
"""
raise NotImplementedError
@DeveloperAPI
def apply_gradients(self, grads):
"""Applies the given gradients to this evaluator's weights.
Either this or learn_on_batch() must be implemented by subclasses.
Examples:
>>> samples = ev1.sample()
>>> grads, info = ev2.compute_gradients(samples)
>>> ev1.apply_gradients(grads)
"""
raise NotImplementedError
@DeveloperAPI
def get_weights(self):
"""Returns the model weights of this Evaluator.
This method must be implemented by subclasses.
Returns:
object: weights that can be set on a compatible evaluator.
info: dictionary of extra metadata.
Examples:
>>> weights = ev1.get_weights()
"""
raise NotImplementedError
@DeveloperAPI
def set_weights(self, weights):
"""Sets the model weights of this Evaluator.
This method must be implemented by subclasses.
Examples:
>>> weights = ev1.get_weights()
>>> ev2.set_weights(weights)
"""
raise NotImplementedError
@DeveloperAPI
def get_host(self):
"""Returns the hostname of the process running this evaluator."""
return os.uname()[1]
@DeveloperAPI
def apply(self, func, *args):
"""Apply the given function to this evaluator instance."""
return func(self, *args)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/metrics.py
|
Python
|
import logging
import numpy as np
import collections
import ray
from ray.rllib.evaluation.rollout_metrics import RolloutMetrics
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.offline.off_policy_estimator import OffPolicyEstimate
from ray.rllib.policy.policy import LEARNER_STATS_KEY
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
@DeveloperAPI
def get_learner_stats(grad_info):
"""Return optimization stats reported from the policy.
Example:
>>> grad_info = evaluator.learn_on_batch(samples)
>>> print(get_stats(grad_info))
{"vf_loss": ..., "policy_loss": ...}
"""
if LEARNER_STATS_KEY in grad_info:
return grad_info[LEARNER_STATS_KEY]
multiagent_stats = {}
for k, v in grad_info.items():
if type(v) is dict:
if LEARNER_STATS_KEY in v:
multiagent_stats[k] = v[LEARNER_STATS_KEY]
return multiagent_stats
@DeveloperAPI
def collect_metrics(local_worker=None,
remote_workers=[],
to_be_collected=[],
timeout_seconds=180):
"""Gathers episode metrics from RolloutWorker instances."""
episodes, to_be_collected = collect_episodes(
local_worker,
remote_workers,
to_be_collected,
timeout_seconds=timeout_seconds)
metrics = summarize_episodes(episodes, episodes)
return metrics
@DeveloperAPI
def collect_episodes(local_worker=None,
remote_workers=[],
to_be_collected=[],
timeout_seconds=180):
"""Gathers new episodes metrics tuples from the given evaluators."""
if remote_workers:
pending = [
a.apply.remote(lambda ev: ev.get_metrics()) for a in remote_workers
] + to_be_collected
collected, to_be_collected = ray.wait(
pending, num_returns=len(pending), timeout=timeout_seconds * 1.0)
if pending and len(collected) == 0:
logger.warning(
"WARNING: collected no metrics in {} seconds".format(
timeout_seconds))
metric_lists = ray_get_and_free(collected)
else:
metric_lists = []
if local_worker:
metric_lists.append(local_worker.get_metrics())
episodes = []
for metrics in metric_lists:
episodes.extend(metrics)
return episodes, to_be_collected
@DeveloperAPI
def summarize_episodes(episodes, new_episodes):
"""Summarizes a set of episode metrics tuples.
Arguments:
episodes: smoothed set of episodes including historical ones
new_episodes: just the new episodes in this iteration
"""
episodes, estimates = _partition(episodes)
new_episodes, _ = _partition(new_episodes)
episode_rewards = []
episode_lengths = []
policy_rewards = collections.defaultdict(list)
custom_metrics = collections.defaultdict(list)
perf_stats = collections.defaultdict(list)
for episode in episodes:
episode_lengths.append(episode.episode_length)
episode_rewards.append(episode.episode_reward)
for k, v in episode.custom_metrics.items():
custom_metrics[k].append(v)
for k, v in episode.perf_stats.items():
perf_stats[k].append(v)
for (_, policy_id), reward in episode.agent_rewards.items():
if policy_id != DEFAULT_POLICY_ID:
policy_rewards[policy_id].append(reward)
if episode_rewards:
min_reward = min(episode_rewards)
max_reward = max(episode_rewards)
else:
min_reward = float("nan")
max_reward = float("nan")
avg_reward = np.mean(episode_rewards)
avg_length = np.mean(episode_lengths)
policy_reward_min = {}
policy_reward_mean = {}
policy_reward_max = {}
for policy_id, rewards in policy_rewards.copy().items():
policy_reward_min[policy_id] = np.min(rewards)
policy_reward_mean[policy_id] = np.mean(rewards)
policy_reward_max[policy_id] = np.max(rewards)
for k, v_list in custom_metrics.copy().items():
custom_metrics[k + "_mean"] = np.mean(v_list)
filt = [v for v in v_list if not np.isnan(v)]
if filt:
custom_metrics[k + "_min"] = np.min(filt)
custom_metrics[k + "_max"] = np.max(filt)
else:
custom_metrics[k + "_min"] = float("nan")
custom_metrics[k + "_max"] = float("nan")
del custom_metrics[k]
for k, v_list in perf_stats.copy().items():
perf_stats[k] = np.mean(v_list)
estimators = collections.defaultdict(lambda: collections.defaultdict(list))
for e in estimates:
acc = estimators[e.estimator_name]
for k, v in e.metrics.items():
acc[k].append(v)
for name, metrics in estimators.items():
for k, v_list in metrics.items():
metrics[k] = np.mean(v_list)
estimators[name] = dict(metrics)
return dict(
episode_reward_max=max_reward,
episode_reward_min=min_reward,
episode_reward_mean=avg_reward,
episode_len_mean=avg_length,
episodes_this_iter=len(new_episodes),
policy_reward_min=policy_reward_min,
policy_reward_max=policy_reward_max,
policy_reward_mean=policy_reward_mean,
custom_metrics=dict(custom_metrics),
sampler_perf=dict(perf_stats),
off_policy_estimator=dict(estimators))
def _partition(episodes):
"""Divides metrics data into true rollouts vs off-policy estimates."""
rollouts, estimates = [], []
for e in episodes:
if isinstance(e, RolloutMetrics):
rollouts.append(e)
elif isinstance(e, OffPolicyEstimate):
estimates.append(e)
else:
raise ValueError("Unknown metric type: {}".format(e))
return rollouts, estimates
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/policy_evaluator.py
|
Python
|
from ray.rllib.utils import renamed_class
from ray.rllib.evaluation import RolloutWorker
PolicyEvaluator = renamed_class(
RolloutWorker, old_name="rllib.evaluation.PolicyEvaluator")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/policy_graph.py
|
Python
|
from ray.rllib.policy.policy import Policy
from ray.rllib.utils import renamed_class
PolicyGraph = renamed_class(Policy, old_name="PolicyGraph")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/postprocessing.py
|
Python
|
import numpy as np
import scipy.signal
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import DeveloperAPI
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
class Postprocessing:
"""Constant definitions for postprocessing."""
ADVANTAGES = "advantages"
VALUE_TARGETS = "value_targets"
@DeveloperAPI
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):
"""Given a rollout, compute its value targets and the advantage.
Args:
rollout (SampleBatch): SampleBatch of a single trajectory
last_r (float): Value estimation for last observation
gamma (float): Discount factor.
lambda_ (float): Parameter for GAE
use_gae (bool): Using Generalized Advantage Estimation
Returns:
SampleBatch (SampleBatch): Object with experience from rollout and
processed rewards.
"""
traj = {}
trajsize = len(rollout[SampleBatch.ACTIONS])
for key in rollout:
traj[key] = np.stack(rollout[key])
if use_gae:
assert SampleBatch.VF_PREDS in rollout, "Values not found!"
vpred_t = np.concatenate(
[rollout[SampleBatch.VF_PREDS],
np.array([last_r])])
delta_t = (
traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])
# This formula for the advantage comes
# "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438
traj[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_)
traj[Postprocessing.VALUE_TARGETS] = (
traj[Postprocessing.ADVANTAGES] +
traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)
else:
rewards_plus_v = np.concatenate(
[rollout[SampleBatch.REWARDS],
np.array([last_r])])
traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:-1]
# TODO(ekl): support using a critic without GAE
traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(
traj[Postprocessing.ADVANTAGES])
traj[Postprocessing.ADVANTAGES] = traj[
Postprocessing.ADVANTAGES].copy().astype(np.float32)
assert all(val.shape[0] == trajsize for val in traj.values()), \
"Rollout stacked incorrectly!"
return SampleBatch(traj)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/rollout_metrics.py
|
Python
|
import collections
# Define this in its own file, see #5125
RolloutMetrics = collections.namedtuple("RolloutMetrics", [
"episode_length", "episode_reward", "agent_rewards", "custom_metrics",
"perf_stats"
])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/rollout_worker.py
|
Python
|
import random
import numpy as np
import gym
import logging
import pickle
import ray
from ray.rllib.env.atari_wrappers import wrap_deepmind, is_atari
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.interface import EvaluatorInterface
from ray.rllib.evaluation.sampler import AsyncSampler, SyncSampler
from ray.rllib.policy.sample_batch import MultiAgentBatch, DEFAULT_POLICY_ID
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.offline import NoopOutput, IOContext, OutputWriter, InputReader
from ray.rllib.offline.is_estimator import ImportanceSamplingEstimator
from ray.rllib.offline.wis_estimator import WeightedImportanceSamplingEstimator
from ray.rllib.models import ModelCatalog
from ray.rllib.models.preprocessors import NoPreprocessor
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import disable_log_once_globally, log_once, \
summarize, enable_periodic_logging
from ray.rllib.utils.filter import get_filter
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
# Handle to the current rollout worker, which will be set to the most recently
# created RolloutWorker in this process. This can be helpful to access in
# custom env or policy classes for debugging or advanced use cases.
_global_worker = None
@DeveloperAPI
def get_global_worker():
"""Returns a handle to the active rollout worker in this process."""
global _global_worker
return _global_worker
@DeveloperAPI
class RolloutWorker(EvaluatorInterface):
"""Common experience collection class.
This class wraps a policy instance and an environment class to
collect experiences from the environment. You can create many replicas of
this class as Ray actors to scale RL training.
This class supports vectorized and multi-agent policy evaluation (e.g.,
VectorEnv, MultiAgentEnv, etc.)
Examples:
>>> # Create a rollout worker and using it to collect experiences.
>>> worker = RolloutWorker(
... env_creator=lambda _: gym.make("CartPole-v0"),
... policy=PGTFPolicy)
>>> print(worker.sample())
SampleBatch({
"obs": [[...]], "actions": [[...]], "rewards": [[...]],
"dones": [[...]], "new_obs": [[...]]})
>>> # Creating a multi-agent rollout worker
>>> worker = RolloutWorker(
... env_creator=lambda _: MultiAgentTrafficGrid(num_cars=25),
... policies={
... # Use an ensemble of two policies for car agents
... "car_policy1":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.99}),
... "car_policy2":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.95}),
... # Use a single shared policy for all traffic lights
... "traffic_light_policy":
... (PGTFPolicy, Box(...), Discrete(...), {}),
... },
... policy_mapping_fn=lambda agent_id:
... random.choice(["car_policy1", "car_policy2"])
... if agent_id.startswith("car_") else "traffic_light_policy")
>>> print(worker.sample())
MultiAgentBatch({
"car_policy1": SampleBatch(...),
"car_policy2": SampleBatch(...),
"traffic_light_policy": SampleBatch(...)})
"""
@DeveloperAPI
@classmethod
def as_remote(cls,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None):
return ray.remote(
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources)(cls)
@DeveloperAPI
def __init__(self,
env_creator,
policy,
policy_mapping_fn=None,
policies_to_train=None,
tf_session_creator=None,
batch_steps=100,
batch_mode="truncate_episodes",
episode_horizon=None,
preprocessor_pref="deepmind",
sample_async=False,
compress_observations=False,
num_envs=1,
observation_filter="NoFilter",
clip_rewards=None,
clip_actions=True,
env_config=None,
model_config=None,
policy_config=None,
worker_index=0,
monitor_path=None,
log_dir=None,
log_level=None,
callbacks=None,
input_creator=lambda ioctx: ioctx.default_sampler_input(),
input_evaluation=frozenset([]),
output_creator=lambda ioctx: NoopOutput(),
remote_worker_envs=False,
remote_env_batch_wait_ms=0,
soft_horizon=False,
no_done_at_end=False,
seed=None,
_fake_sampler=False):
"""Initialize a rollout worker.
Arguments:
env_creator (func): Function that returns a gym.Env given an
EnvContext wrapped configuration.
policy (class|dict): Either a class implementing
Policy, or a dictionary of policy id strings to
(Policy, obs_space, action_space, config) tuples. If a
dict is specified, then we are in multi-agent mode and a
policy_mapping_fn should also be set.
policy_mapping_fn (func): A function that maps agent ids to
policy ids in multi-agent mode. This function will be called
each time a new agent appears in an episode, to bind that agent
to a policy for the duration of the episode.
policies_to_train (list): Optional whitelist of policies to train,
or None for all policies.
tf_session_creator (func): A function that returns a TF session.
This is optional and only useful with TFPolicy.
batch_steps (int): The target number of env transitions to include
in each sample batch returned from this worker.
batch_mode (str): One of the following batch modes:
"truncate_episodes": Each call to sample() will return a batch
of at most `batch_steps * num_envs` in size. The batch will
be exactly `batch_steps * num_envs` in size if
postprocessing does not change batch sizes. Episodes may be
truncated in order to meet this size requirement.
"complete_episodes": Each call to sample() will return a batch
of at least `batch_steps * num_envs` in size. Episodes will
not be truncated, but multiple episodes may be packed
within one batch to meet the batch size. Note that when
`num_envs > 1`, episode steps will be buffered until the
episode completes, and hence batches may contain
significant amounts of off-policy data.
episode_horizon (int): Whether to stop episodes at this horizon.
preprocessor_pref (str): Whether to prefer RLlib preprocessors
("rllib") or deepmind ("deepmind") when applicable.
sample_async (bool): Whether to compute samples asynchronously in
the background, which improves throughput but can cause samples
to be slightly off-policy.
compress_observations (bool): If true, compress the observations.
They can be decompressed with rllib/utils/compression.
num_envs (int): If more than one, will create multiple envs
and vectorize the computation of actions. This has no effect if
if the env already implements VectorEnv.
observation_filter (str): Name of observation filter to use.
clip_rewards (bool): Whether to clip rewards to [-1, 1] prior to
experience postprocessing. Setting to None means clip for Atari
only.
clip_actions (bool): Whether to clip action values to the range
specified by the policy action space.
env_config (dict): Config to pass to the env creator.
model_config (dict): Config to use when creating the policy model.
policy_config (dict): Config to pass to the policy. In the
multi-agent case, this config will be merged with the
per-policy configs specified by `policy`.
worker_index (int): For remote workers, this should be set to a
non-zero and unique value. This index is passed to created envs
through EnvContext so that envs can be configured per worker.
monitor_path (str): Write out episode stats and videos to this
directory if specified.
log_dir (str): Directory where logs can be placed.
log_level (str): Set the root log level on creation.
callbacks (dict): Dict of custom debug callbacks.
input_creator (func): Function that returns an InputReader object
for loading previous generated experiences.
input_evaluation (list): How to evaluate the policy performance.
This only makes sense to set when the input is reading offline
data. The possible values include:
- "is": the step-wise importance sampling estimator.
- "wis": the weighted step-wise is estimator.
- "simulation": run the environment in the background, but
use this data for evaluation only and never for learning.
output_creator (func): Function that returns an OutputWriter object
for saving generated experiences.
remote_worker_envs (bool): If using num_envs > 1, whether to create
those new envs in remote processes instead of in the current
process. This adds overheads, but can make sense if your envs
remote_env_batch_wait_ms (float): Timeout that remote workers
are waiting when polling environments. 0 (continue when at
least one env is ready) is a reasonable default, but optimal
value could be obtained by measuring your environment
step / reset and model inference perf.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the
episode and instead record done=False.
seed (int): Set the seed of both np and tf to this value to
to ensure each remote worker has unique exploration behavior.
_fake_sampler (bool): Use a fake (inf speed) sampler for testing.
"""
global _global_worker
_global_worker = self
policy_config = policy_config or {}
if (tf and policy_config.get("eager")
and not policy_config.get("no_eager_on_workers")):
tf.enable_eager_execution()
if log_level:
logging.getLogger("ray.rllib").setLevel(log_level)
if worker_index > 1:
disable_log_once_globally() # only need 1 worker to log
elif log_level == "DEBUG":
enable_periodic_logging()
env_context = EnvContext(env_config or {}, worker_index)
self.policy_config = policy_config
self.callbacks = callbacks or {}
self.worker_index = worker_index
model_config = model_config or {}
policy_mapping_fn = (policy_mapping_fn
or (lambda agent_id: DEFAULT_POLICY_ID))
if not callable(policy_mapping_fn):
raise ValueError("Policy mapping function not callable?")
self.env_creator = env_creator
self.sample_batch_size = batch_steps * num_envs
self.batch_mode = batch_mode
self.compress_observations = compress_observations
self.preprocessing_enabled = True
self.last_batch = None
self._fake_sampler = _fake_sampler
self.env = _validate_env(env_creator(env_context))
if isinstance(self.env, MultiAgentEnv) or \
isinstance(self.env, BaseEnv):
def wrap(env):
return env # we can't auto-wrap these env types
elif is_atari(self.env) and \
not model_config.get("custom_preprocessor") and \
preprocessor_pref == "deepmind":
# Deepmind wrappers already handle all preprocessing
self.preprocessing_enabled = False
if clip_rewards is None:
clip_rewards = True
def wrap(env):
env = wrap_deepmind(
env,
dim=model_config.get("dim"),
framestack=model_config.get("framestack"))
if monitor_path:
from gym import wrappers
env = wrappers.Monitor(env, monitor_path, resume=True)
return env
else:
def wrap(env):
if monitor_path:
from gym import wrappers
env = wrappers.Monitor(env, monitor_path, resume=True)
return env
self.env = wrap(self.env)
def make_env(vector_index):
return wrap(
env_creator(
env_context.copy_with_overrides(
vector_index=vector_index, remote=remote_worker_envs)))
self.tf_sess = None
policy_dict = _validate_and_canonicalize(policy, self.env)
self.policies_to_train = policies_to_train or list(policy_dict.keys())
# set numpy and python seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if not hasattr(self.env, "seed"):
raise ValueError("Env doesn't support env.seed(): {}".format(
self.env))
self.env.seed(seed)
try:
assert torch is not None
torch.manual_seed(seed)
except AssertionError:
logger.info("Could not seed torch")
if _has_tensorflow_graph(policy_dict) and not (tf and
tf.executing_eagerly()):
if not tf:
raise ImportError("Could not import tensorflow")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
else:
self.tf_sess = tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True)))
with self.tf_sess.as_default():
# set graph-level seed
if seed is not None:
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE):
if not ray.get_gpu_ids():
logger.debug(
"Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
elif not tf.test.is_gpu_available():
raise RuntimeError(
"GPUs were assigned to this worker by Ray, but "
"TensorFlow reports GPU acceleration is disabled. "
"This could be due to a bad CUDA or TF installation.")
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)
self.multiagent = set(self.policy_map.keys()) != {DEFAULT_POLICY_ID}
if self.multiagent:
if not ((isinstance(self.env, MultiAgentEnv)
or isinstance(self.env, ExternalMultiAgentEnv))
or isinstance(self.env, BaseEnv)):
raise ValueError(
"Have multiple policies {}, but the env ".format(
self.policy_map) +
"{} is not a subclass of BaseEnv, MultiAgentEnv or "
"ExternalMultiAgentEnv?".format(self.env))
self.filters = {
policy_id: get_filter(observation_filter,
policy.observation_space.shape)
for (policy_id, policy) in self.policy_map.items()
}
if self.worker_index == 0:
logger.info("Built filter map: {}".format(self.filters))
# Always use vector env for consistency even if num_envs = 1
self.async_env = BaseEnv.to_base_env(
self.env,
make_env=make_env,
num_envs=num_envs,
remote_envs=remote_worker_envs,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
self.num_envs = num_envs
if self.batch_mode == "truncate_episodes":
unroll_length = batch_steps
pack_episodes = True
elif self.batch_mode == "complete_episodes":
unroll_length = float("inf") # never cut episodes
pack_episodes = False # sampler will return 1 episode per poll
else:
raise ValueError("Unsupported batch mode: {}".format(
self.batch_mode))
self.io_context = IOContext(log_dir, policy_config, worker_index, self)
self.reward_estimators = []
for method in input_evaluation:
if method == "simulation":
logger.warning(
"Requested 'simulation' input evaluation method: "
"will discard all sampler outputs and keep only metrics.")
sample_async = True
elif method == "is":
ise = ImportanceSamplingEstimator.create(self.io_context)
self.reward_estimators.append(ise)
elif method == "wis":
wise = WeightedImportanceSamplingEstimator.create(
self.io_context)
self.reward_estimators.append(wise)
else:
raise ValueError(
"Unknown evaluation method: {}".format(method))
if sample_async:
self.sampler = AsyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
blackhole_outputs="simulation" in input_evaluation,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.sampler.start()
else:
self.sampler = SyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.input_reader = input_creator(self.io_context)
assert isinstance(self.input_reader, InputReader), self.input_reader
self.output_writer = output_creator(self.io_context)
assert isinstance(self.output_writer, OutputWriter), self.output_writer
logger.debug(
"Created rollout worker with env {} ({}), policies {}".format(
self.async_env, self.env, self.policy_map))
@override(EvaluatorInterface)
def sample(self):
"""Evaluate the current policies and return a batch of experiences.
Return:
SampleBatch|MultiAgentBatch from evaluating the current policies.
"""
if self._fake_sampler and self.last_batch is not None:
return self.last_batch
if log_once("sample_start"):
logger.info("Generating sample batch of size {}".format(
self.sample_batch_size))
batches = [self.input_reader.next()]
steps_so_far = batches[0].count
# In truncate_episodes mode, never pull more than 1 batch per env.
# This avoids over-running the target batch size.
if self.batch_mode == "truncate_episodes":
max_batches = self.num_envs
else:
max_batches = float("inf")
while steps_so_far < self.sample_batch_size and len(
batches) < max_batches:
batch = self.input_reader.next()
steps_so_far += batch.count
batches.append(batch)
batch = batches[0].concat_samples(batches)
if self.callbacks.get("on_sample_end"):
self.callbacks["on_sample_end"]({"worker": self, "samples": batch})
# Always do writes prior to compression for consistency and to allow
# for better compression inside the writer.
self.output_writer.write(batch)
# Do off-policy estimation if needed
if self.reward_estimators:
for sub_batch in batch.split_by_episode():
for estimator in self.reward_estimators:
estimator.process(sub_batch)
if log_once("sample_end"):
logger.info("Completed sample batch:\n\n{}\n".format(
summarize(batch)))
if self.compress_observations == "bulk":
batch.compress(bulk=True)
elif self.compress_observations:
batch.compress()
if self._fake_sampler:
self.last_batch = batch
return batch
@DeveloperAPI
@ray.method(num_return_vals=2)
def sample_with_count(self):
"""Same as sample() but returns the count as a separate future."""
batch = self.sample()
return batch, batch.count
@override(EvaluatorInterface)
def get_weights(self, policies=None):
if policies is None:
policies = self.policy_map.keys()
return {
pid: policy.get_weights()
for pid, policy in self.policy_map.items() if pid in policies
}
@override(EvaluatorInterface)
def set_weights(self, weights):
for pid, w in weights.items():
self.policy_map[pid].set_weights(w)
@override(EvaluatorInterface)
def compute_gradients(self, samples):
if log_once("compute_gradients"):
logger.info("Compute gradients on:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
grad_out, info_out = {}, {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "compute_gradients")
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid]._build_compute_gradients(
builder, batch))
grad_out = {k: builder.get(v) for k, v in grad_out.items()}
info_out = {k: builder.get(v) for k, v in info_out.items()}
else:
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid].compute_gradients(batch))
else:
grad_out, info_out = (
self.policy_map[DEFAULT_POLICY_ID].compute_gradients(samples))
info_out["batch_count"] = samples.count
if log_once("grad_out"):
logger.info("Compute grad info:\n\n{}\n".format(
summarize(info_out)))
return grad_out, info_out
@override(EvaluatorInterface)
def apply_gradients(self, grads):
if log_once("apply_gradients"):
logger.info("Apply gradients:\n\n{}\n".format(summarize(grads)))
if isinstance(grads, dict):
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "apply_gradients")
outputs = {
pid: self.policy_map[pid]._build_apply_gradients(
builder, grad)
for pid, grad in grads.items()
}
return {k: builder.get(v) for k, v in outputs.items()}
else:
return {
pid: self.policy_map[pid].apply_gradients(g)
for pid, g in grads.items()
}
else:
return self.policy_map[DEFAULT_POLICY_ID].apply_gradients(grads)
@override(EvaluatorInterface)
def learn_on_batch(self, samples):
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(
builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples)
if log_once("learn_out"):
logger.debug("Training out:\n\n{}\n".format(summarize(info_out)))
return info_out
@DeveloperAPI
def get_metrics(self):
"""Returns a list of new RolloutMetric objects from evaluation."""
out = self.sampler.get_metrics()
for m in self.reward_estimators:
out.extend(m.get_metrics())
return out
@DeveloperAPI
def foreach_env(self, func):
"""Apply the given function to each underlying env instance."""
envs = self.async_env.get_unwrapped()
if not envs:
return [func(self.async_env)]
else:
return [func(e) for e in envs]
@DeveloperAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.policy_map.get(policy_id)
@DeveloperAPI
def for_policy(self, func, policy_id=DEFAULT_POLICY_ID):
"""Apply the given function to the specified policy."""
return func(self.policy_map[policy_id])
@DeveloperAPI
def foreach_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple."""
return [func(policy, pid) for pid, policy in self.policy_map.items()]
@DeveloperAPI
def foreach_trainable_policy(self, func):
"""
Applies the given function to each (policy, policy_id) tuple, which
can be found in `self.policies_to_train`.
Args:
func (callable): A function - taking a Policy and its ID - that is
called on all Policies within `self.policies_to_train`.
Returns:
List[any]: The list of n return values of all
`func([policy], [ID])`-calls.
"""
return [
func(policy, pid) for pid, policy in self.policy_map.items()
if pid in self.policies_to_train
]
@DeveloperAPI
def sync_filters(self, new_filters):
"""Changes self's filter to given and rebases any accumulated delta.
Args:
new_filters (dict): Filters with new state to update local copy.
"""
assert all(k in new_filters for k in self.filters)
for k in self.filters:
self.filters[k].sync(new_filters[k])
@DeveloperAPI
def get_filters(self, flush_after=False):
"""Returns a snapshot of filters.
Args:
flush_after (bool): Clears the filter buffer state.
Returns:
return_filters (dict): Dict for serializable filters
"""
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
@DeveloperAPI
def save(self):
filters = self.get_filters(flush_after=True)
state = {
pid: self.policy_map[pid].get_state()
for pid in self.policy_map
}
return pickle.dumps({"filters": filters, "state": state})
@DeveloperAPI
def restore(self, objs):
objs = pickle.loads(objs)
self.sync_filters(objs["filters"])
for pid, state in objs["state"].items():
self.policy_map[pid].set_state(state)
@DeveloperAPI
def set_global_vars(self, global_vars):
self.foreach_policy(lambda p, _: p.on_global_var_update(global_vars))
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_model(export_dir)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_checkpoint(export_dir,
filename_prefix)
@DeveloperAPI
def stop(self):
self.async_env.stop()
def _build_policy_map(self, policy_dict, policy_config):
policy_map = {}
preprocessors = {}
for name, (cls, obs_space, act_space,
conf) in sorted(policy_dict.items()):
logger.debug("Creating policy for {}".format(name))
merged_conf = merge_dicts(policy_config, conf)
if self.preprocessing_enabled:
preprocessor = ModelCatalog.get_preprocessor_for_space(
obs_space, merged_conf.get("model"))
preprocessors[name] = preprocessor
obs_space = preprocessor.observation_space
else:
preprocessors[name] = NoPreprocessor(obs_space)
if isinstance(obs_space, gym.spaces.Dict) or \
isinstance(obs_space, gym.spaces.Tuple):
raise ValueError(
"Found raw Tuple|Dict space as input to policy. "
"Please preprocess these observations with a "
"Tuple|DictFlatteningPreprocessor.")
if tf and tf.executing_eagerly():
if hasattr(cls, "as_eager"):
cls = cls.as_eager()
if policy_config["eager_tracing"]:
cls = cls.with_tracing()
elif not issubclass(cls, TFPolicy):
pass # could be some other type of policy
else:
raise ValueError("This policy does not support eager "
"execution: {}".format(cls))
if tf:
with tf.variable_scope(name):
policy_map[name] = cls(obs_space, act_space, merged_conf)
else:
policy_map[name] = cls(obs_space, act_space, merged_conf)
if self.worker_index == 0:
logger.info("Built policy map: {}".format(policy_map))
logger.info("Built preprocessor map: {}".format(preprocessors))
return policy_map, preprocessors
def __del__(self):
if hasattr(self, "sampler") and isinstance(self.sampler, AsyncSampler):
self.sampler.shutdown = True
def _validate_and_canonicalize(policy, env):
if isinstance(policy, dict):
_validate_multiagent_config(policy)
return policy
elif not issubclass(policy, Policy):
raise ValueError("policy must be a rllib.Policy class")
else:
if (isinstance(env, MultiAgentEnv)
and not hasattr(env, "observation_space")):
raise ValueError(
"MultiAgentEnv must have observation_space defined if run "
"in a single-agent configuration.")
return {
DEFAULT_POLICY_ID: (policy, env.observation_space,
env.action_space, {})
}
def _validate_multiagent_config(policy, allow_none_graph=False):
for k, v in policy.items():
if not isinstance(k, str):
raise ValueError("policy keys must be strs, got {}".format(
type(k)))
if not isinstance(v, (tuple, list)) or len(v) != 4:
raise ValueError(
"policy values must be tuples/lists of "
"(cls or None, obs_space, action_space, config), got {}".
format(v))
if allow_none_graph and v[0] is None:
pass
elif not issubclass(v[0], Policy):
raise ValueError("policy tuple value 0 must be a rllib.Policy "
"class or None, got {}".format(v[0]))
if not isinstance(v[1], gym.Space):
raise ValueError(
"policy tuple value 1 (observation_space) must be a "
"gym.Space, got {}".format(type(v[1])))
if not isinstance(v[2], gym.Space):
raise ValueError("policy tuple value 2 (action_space) must be a "
"gym.Space, got {}".format(type(v[2])))
if not isinstance(v[3], dict):
raise ValueError("policy tuple value 3 (config) must be a dict, "
"got {}".format(type(v[3])))
def _validate_env(env):
# allow this as a special case (assumed gym.Env)
if hasattr(env, "observation_space") and hasattr(env, "action_space"):
return env
allowed_types = [gym.Env, MultiAgentEnv, ExternalEnv, VectorEnv, BaseEnv]
if not any(isinstance(env, tpe) for tpe in allowed_types):
raise ValueError(
"Returned env should be an instance of gym.Env, MultiAgentEnv, "
"ExternalEnv, VectorEnv, or BaseEnv. The provided env creator "
"function returned {} ({}).".format(env, type(env)))
return env
def _has_tensorflow_graph(policy_dict):
for policy, _, _, _ in policy_dict.values():
if issubclass(policy, TFPolicy):
return True
return False
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/sample_batch.py
|
Python
|
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils import renamed_class
SampleBatch = renamed_class(
SampleBatch, old_name="rllib.evaluation.SampleBatch")
MultiAgentBatch = renamed_class(
MultiAgentBatch, old_name="rllib.evaluation.MultiAgentBatch")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/sample_batch_builder.py
|
Python
|
import collections
import logging
import numpy as np
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
from ray.rllib.utils.debug import log_once, summarize
logger = logging.getLogger(__name__)
def to_float_array(v):
arr = np.array(v)
if arr.dtype == np.float64:
return arr.astype(np.float32) # save some memory
return arr
@PublicAPI
class SampleBatchBuilder:
"""Util to build a SampleBatch incrementally.
For efficiency, SampleBatches hold values in column form (as arrays).
However, it is useful to add data one row (dict) at a time.
"""
@PublicAPI
def __init__(self):
self.buffers = collections.defaultdict(list)
self.count = 0
self.unroll_id = 0 # disambiguates unrolls within a single episode
@PublicAPI
def add_values(self, **values):
"""Add the given dictionary (row) of values to this batch."""
for k, v in values.items():
self.buffers[k].append(v)
self.count += 1
@PublicAPI
def add_batch(self, batch):
"""Add the given batch of values to this batch."""
for k, column in batch.items():
self.buffers[k].extend(column)
self.count += batch.count
@PublicAPI
def build_and_reset(self):
"""Returns a sample batch including all previously added values."""
batch = SampleBatch(
{k: to_float_array(v)
for k, v in self.buffers.items()})
batch.data[SampleBatch.UNROLL_ID] = np.repeat(self.unroll_id,
batch.count)
self.buffers.clear()
self.count = 0
self.unroll_id += 1
return batch
@DeveloperAPI
class MultiAgentSampleBatchBuilder:
"""Util to build SampleBatches for each policy in a multi-agent env.
Input data is per-agent, while output data is per-policy. There is an M:N
mapping between agents and policies. We retain one local batch builder
per agent. When an agent is done, then its local batch is appended into the
corresponding policy batch for the agent's policy.
"""
def __init__(self, policy_map, clip_rewards, postp_callback):
"""Initialize a MultiAgentSampleBatchBuilder.
Arguments:
policy_map (dict): Maps policy ids to policy instances.
clip_rewards (bool): Whether to clip rewards before postprocessing.
postp_callback: function to call on each postprocessed batch.
"""
self.policy_map = policy_map
self.clip_rewards = clip_rewards
self.policy_builders = {
k: SampleBatchBuilder()
for k in policy_map.keys()
}
self.agent_builders = {}
self.agent_to_policy = {}
self.postp_callback = postp_callback
self.count = 0 # increment this manually
def total(self):
"""Returns summed number of steps across all agent buffers."""
return sum(p.count for p in self.policy_builders.values())
def has_pending_data(self):
"""Returns whether there is pending unprocessed data."""
return len(self.agent_builders) > 0
@DeveloperAPI
def add_values(self, agent_id, policy_id, **values):
"""Add the given dictionary (row) of values to this batch.
Arguments:
agent_id (obj): Unique id for the agent we are adding values for.
policy_id (obj): Unique id for policy controlling the agent.
values (dict): Row of values to add for this agent.
"""
if agent_id not in self.agent_builders:
self.agent_builders[agent_id] = SampleBatchBuilder()
self.agent_to_policy[agent_id] = policy_id
builder = self.agent_builders[agent_id]
builder.add_values(**values)
def postprocess_batch_so_far(self, episode):
"""Apply policy postprocessors to any unprocessed rows.
This pushes the postprocessed per-agent batches onto the per-policy
builders, clearing per-agent state.
Arguments:
episode: current MultiAgentEpisode object or None
"""
# Materialize the batches so far
pre_batches = {}
for agent_id, builder in self.agent_builders.items():
pre_batches[agent_id] = (
self.policy_map[self.agent_to_policy[agent_id]],
builder.build_and_reset())
# Apply postprocessor
post_batches = {}
if self.clip_rewards:
for _, (_, pre_batch) in pre_batches.items():
pre_batch["rewards"] = np.sign(pre_batch["rewards"])
for agent_id, (_, pre_batch) in pre_batches.items():
other_batches = pre_batches.copy()
del other_batches[agent_id]
policy = self.policy_map[self.agent_to_policy[agent_id]]
if any(pre_batch["dones"][:-1]) or len(set(
pre_batch["eps_id"])) > 1:
raise ValueError(
"Batches sent to postprocessing must only contain steps "
"from a single trajectory.", pre_batch)
post_batches[agent_id] = policy.postprocess_trajectory(
pre_batch, other_batches, episode)
if log_once("after_post"):
logger.info(
"Trajectory fragment after postprocess_trajectory():\n\n{}\n".
format(summarize(post_batches)))
# Append into policy batches and reset
for agent_id, post_batch in sorted(post_batches.items()):
self.policy_builders[self.agent_to_policy[agent_id]].add_batch(
post_batch)
if self.postp_callback:
self.postp_callback({
"episode": episode,
"agent_id": agent_id,
"pre_batch": pre_batches[agent_id],
"post_batch": post_batch,
"all_pre_batches": pre_batches,
})
self.agent_builders.clear()
self.agent_to_policy.clear()
def check_missing_dones(self):
for agent_id, builder in self.agent_builders.items():
if builder.buffers["dones"][-1] is not True:
raise ValueError(
"The environment terminated for all agents, but we still "
"don't have a last observation for "
"agent {} (policy {}). ".format(
agent_id, self.agent_to_policy[agent_id]) +
"Please ensure that you include the last observations "
"of all live agents when setting '__all__' done to True. "
"Alternatively, set no_done_at_end=True to allow this.")
@DeveloperAPI
def build_and_reset(self, episode):
"""Returns the accumulated sample batches for each policy.
Any unprocessed rows will be first postprocessed with a policy
postprocessor. The internal state of this builder will be reset.
Arguments:
episode: current MultiAgentEpisode object or None
"""
self.postprocess_batch_so_far(episode)
policy_batches = {}
for policy_id, builder in self.policy_builders.items():
if builder.count > 0:
policy_batches[policy_id] = builder.build_and_reset()
old_count = self.count
self.count = 0
return MultiAgentBatch.wrap_as_needed(policy_batches, old_count)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/sampler.py
|
Python
|
from collections import defaultdict, namedtuple
import logging
import numpy as np
import six.moves.queue as queue
import threading
import time
from ray.rllib.evaluation.episode import MultiAgentEpisode, _flatten_action
from ray.rllib.evaluation.rollout_metrics import RolloutMetrics
from ray.rllib.evaluation.sample_batch_builder import \
MultiAgentSampleBatchBuilder
from ray.rllib.policy.policy import TupleActions
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.env.base_env import BaseEnv, ASYNC_RESET_RETURN
from ray.rllib.env.atari_wrappers import get_wrapper_by_cls, MonitorEnv
from ray.rllib.offline import InputReader
from ray.rllib.utils.annotations import override
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.policy.policy import clip_action
logger = logging.getLogger(__name__)
PolicyEvalData = namedtuple("PolicyEvalData", [
"env_id", "agent_id", "obs", "info", "rnn_state", "prev_action",
"prev_reward"
])
class PerfStats:
"""Sampler perf stats that will be included in rollout metrics."""
def __init__(self):
self.iters = 0
self.env_wait_time = 0.0
self.processing_time = 0.0
self.inference_time = 0.0
def get(self):
return {
"mean_env_wait_ms": self.env_wait_time * 1000 / self.iters,
"mean_processing_ms": self.processing_time * 1000 / self.iters,
"mean_inference_ms": self.inference_time * 1000 / self.iters
}
class SamplerInput(InputReader):
"""Reads input experiences from an existing sampler."""
@override(InputReader)
def next(self):
batches = [self.get_data()]
batches.extend(self.get_extra_batches())
if len(batches) > 1:
return batches[0].concat_samples(batches)
else:
return batches[0]
class SyncSampler(SamplerInput):
def __init__(self,
env,
policies,
policy_mapping_fn,
preprocessors,
obs_filters,
clip_rewards,
unroll_length,
callbacks,
horizon=None,
pack=False,
tf_sess=None,
clip_actions=True,
soft_horizon=False,
no_done_at_end=False):
self.base_env = BaseEnv.to_base_env(env)
self.unroll_length = unroll_length
self.horizon = horizon
self.policies = policies
self.policy_mapping_fn = policy_mapping_fn
self.preprocessors = preprocessors
self.obs_filters = obs_filters
self.extra_batches = queue.Queue()
self.perf_stats = PerfStats()
self.rollout_provider = _env_runner(
self.base_env, self.extra_batches.put, self.policies,
self.policy_mapping_fn, self.unroll_length, self.horizon,
self.preprocessors, self.obs_filters, clip_rewards, clip_actions,
pack, callbacks, tf_sess, self.perf_stats, soft_horizon,
no_done_at_end)
self.metrics_queue = queue.Queue()
def get_data(self):
while True:
item = next(self.rollout_provider)
if isinstance(item, RolloutMetrics):
self.metrics_queue.put(item)
else:
return item
def get_metrics(self):
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait()._replace(
perf_stats=self.perf_stats.get()))
except queue.Empty:
break
return completed
def get_extra_batches(self):
extra = []
while True:
try:
extra.append(self.extra_batches.get_nowait())
except queue.Empty:
break
return extra
class AsyncSampler(threading.Thread, SamplerInput):
def __init__(self,
env,
policies,
policy_mapping_fn,
preprocessors,
obs_filters,
clip_rewards,
unroll_length,
callbacks,
horizon=None,
pack=False,
tf_sess=None,
clip_actions=True,
blackhole_outputs=False,
soft_horizon=False,
no_done_at_end=False):
for _, f in obs_filters.items():
assert getattr(f, "is_concurrent", False), \
"Observation Filter must support concurrent updates."
self.base_env = BaseEnv.to_base_env(env)
threading.Thread.__init__(self)
self.queue = queue.Queue(5)
self.extra_batches = queue.Queue()
self.metrics_queue = queue.Queue()
self.unroll_length = unroll_length
self.horizon = horizon
self.policies = policies
self.policy_mapping_fn = policy_mapping_fn
self.preprocessors = preprocessors
self.obs_filters = obs_filters
self.clip_rewards = clip_rewards
self.daemon = True
self.pack = pack
self.tf_sess = tf_sess
self.callbacks = callbacks
self.clip_actions = clip_actions
self.blackhole_outputs = blackhole_outputs
self.soft_horizon = soft_horizon
self.no_done_at_end = no_done_at_end
self.perf_stats = PerfStats()
self.shutdown = False
def run(self):
try:
self._run()
except BaseException as e:
self.queue.put(e)
raise e
def _run(self):
if self.blackhole_outputs:
queue_putter = (lambda x: None)
extra_batches_putter = (lambda x: None)
else:
queue_putter = self.queue.put
extra_batches_putter = (
lambda x: self.extra_batches.put(x, timeout=600.0))
rollout_provider = _env_runner(
self.base_env, extra_batches_putter, self.policies,
self.policy_mapping_fn, self.unroll_length, self.horizon,
self.preprocessors, self.obs_filters, self.clip_rewards,
self.clip_actions, self.pack, self.callbacks, self.tf_sess,
self.perf_stats, self.soft_horizon, self.no_done_at_end)
while not self.shutdown:
# The timeout variable exists because apparently, if one worker
# dies, the other workers won't die with it, unless the timeout is
# set to some large number. This is an empirical observation.
item = next(rollout_provider)
if isinstance(item, RolloutMetrics):
self.metrics_queue.put(item)
else:
queue_putter(item)
def get_data(self):
if not self.is_alive():
raise RuntimeError("Sampling thread has died")
rollout = self.queue.get(timeout=600.0)
# Propagate errors
if isinstance(rollout, BaseException):
raise rollout
return rollout
def get_metrics(self):
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait()._replace(
perf_stats=self.perf_stats.get()))
except queue.Empty:
break
return completed
def get_extra_batches(self):
extra = []
while True:
try:
extra.append(self.extra_batches.get_nowait())
except queue.Empty:
break
return extra
def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn,
unroll_length, horizon, preprocessors, obs_filters,
clip_rewards, clip_actions, pack, callbacks, tf_sess,
perf_stats, soft_horizon, no_done_at_end):
"""This implements the common experience collection logic.
Args:
base_env (BaseEnv): env implementing BaseEnv.
extra_batch_callback (fn): function to send extra batch data to.
policies (dict): Map of policy ids to Policy instances.
policy_mapping_fn (func): Function that maps agent ids to policy ids.
This is called when an agent first enters the environment. The
agent is then "bound" to the returned policy for the episode.
unroll_length (int): Number of episode steps before `SampleBatch` is
yielded. Set to infinity to yield complete episodes.
horizon (int): Horizon of the episode.
preprocessors (dict): Map of policy id to preprocessor for the
observations prior to filtering.
obs_filters (dict): Map of policy id to filter used to process
observations for the policy.
clip_rewards (bool): Whether to clip rewards before postprocessing.
pack (bool): Whether to pack multiple episodes into each batch. This
guarantees batches will be exactly `unroll_length` in size.
clip_actions (bool): Whether to clip actions to the space range.
callbacks (dict): User callbacks to run on episode events.
tf_sess (Session|None): Optional tensorflow session to use for batching
TF policy evaluations.
perf_stats (PerfStats): Record perf stats into this object.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the episode
and instead record done=False.
Yields:
rollout (SampleBatch): Object containing state, action, reward,
terminal condition, and other fields as dictated by `policy`.
"""
try:
if not horizon:
horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps)
except Exception:
logger.debug("no episode horizon specified, assuming inf")
if not horizon:
horizon = float("inf")
# Pool of batch builders, which can be shared across episodes to pack
# trajectory data.
batch_builder_pool = []
def get_batch_builder():
if batch_builder_pool:
return batch_builder_pool.pop()
else:
return MultiAgentSampleBatchBuilder(
policies, clip_rewards, callbacks.get("on_postprocess_traj"))
def new_episode():
episode = MultiAgentEpisode(policies, policy_mapping_fn,
get_batch_builder, extra_batch_callback)
if callbacks.get("on_episode_start"):
callbacks["on_episode_start"]({
"env": base_env,
"policy": policies,
"episode": episode,
})
return episode
active_episodes = defaultdict(new_episode)
while True:
perf_stats.iters += 1
t0 = time.time()
# Get observations from all ready agents
unfiltered_obs, rewards, dones, infos, off_policy_actions = \
base_env.poll()
perf_stats.env_wait_time += time.time() - t0
if log_once("env_returns"):
logger.info("Raw obs from env: {}".format(
summarize(unfiltered_obs)))
logger.info("Info return from env: {}".format(summarize(infos)))
# Process observations and prepare for policy evaluation
t1 = time.time()
active_envs, to_eval, outputs = _process_observations(
base_env, policies, batch_builder_pool, active_episodes,
unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon,
preprocessors, obs_filters, unroll_length, pack, callbacks,
soft_horizon, no_done_at_end)
perf_stats.processing_time += time.time() - t1
for o in outputs:
yield o
# Do batched policy eval
t2 = time.time()
eval_results = _do_policy_eval(tf_sess, to_eval, policies,
active_episodes)
perf_stats.inference_time += time.time() - t2
# Process results and update episode state
t3 = time.time()
actions_to_send = _process_policy_eval_results(
to_eval, eval_results, active_episodes, active_envs,
off_policy_actions, policies, clip_actions)
perf_stats.processing_time += time.time() - t3
# Return computed actions to ready envs. We also send to envs that have
# taken off-policy actions; those envs are free to ignore the action.
t4 = time.time()
base_env.send_actions(actions_to_send)
perf_stats.env_wait_time += time.time() - t4
def _process_observations(base_env, policies, batch_builder_pool,
active_episodes, unfiltered_obs, rewards, dones,
infos, off_policy_actions, horizon, preprocessors,
obs_filters, unroll_length, pack, callbacks,
soft_horizon, no_done_at_end):
"""Record new data from the environment and prepare for policy evaluation.
Returns:
active_envs: set of non-terminated env ids
to_eval: map of policy_id to list of agent PolicyEvalData
outputs: list of metrics and samples to return from the sampler
"""
active_envs = set()
to_eval = defaultdict(list)
outputs = []
# For each environment
for env_id, agent_obs in unfiltered_obs.items():
new_episode = env_id not in active_episodes
episode = active_episodes[env_id]
if not new_episode:
episode.length += 1
episode.batch_builder.count += 1
episode._add_agent_rewards(rewards[env_id])
if (episode.batch_builder.total() > max(1000, unroll_length * 10)
and log_once("large_batch_warning")):
logger.warning(
"More than {} observations for {} env steps ".format(
episode.batch_builder.total(),
episode.batch_builder.count) + "are buffered in "
"the sampler. If this is more than you expected, check that "
"that you set a horizon on your environment correctly. Note "
"that in multi-agent environments, `sample_batch_size` sets "
"the batch size based on environment steps, not the steps of "
"individual agents, which can result in unexpectedly large "
"batches.")
# Check episode termination conditions
if dones[env_id]["__all__"] or episode.length >= horizon:
hit_horizon = (episode.length >= horizon
and not dones[env_id]["__all__"])
all_done = True
atari_metrics = _fetch_atari_metrics(base_env)
if atari_metrics is not None:
for m in atari_metrics:
outputs.append(
m._replace(custom_metrics=episode.custom_metrics))
else:
outputs.append(
RolloutMetrics(episode.length, episode.total_reward,
dict(episode.agent_rewards),
episode.custom_metrics, {}))
else:
hit_horizon = False
all_done = False
active_envs.add(env_id)
# For each agent in the environment
for agent_id, raw_obs in agent_obs.items():
policy_id = episode.policy_for(agent_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
if log_once("prep_obs"):
logger.info("Preprocessed obs: {}".format(summarize(prep_obs)))
filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)
if log_once("filtered_obs"):
logger.info("Filtered obs: {}".format(summarize(filtered_obs)))
agent_done = bool(all_done or dones[env_id].get(agent_id))
if not agent_done:
to_eval[policy_id].append(
PolicyEvalData(env_id, agent_id, filtered_obs,
infos[env_id].get(agent_id, {}),
episode.rnn_state_for(agent_id),
episode.last_action_for(agent_id),
rewards[env_id][agent_id] or 0.0))
last_observation = episode.last_observation_for(agent_id)
episode._set_last_observation(agent_id, filtered_obs)
episode._set_last_raw_obs(agent_id, raw_obs)
episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))
# Record transition info if applicable
if (last_observation is not None and infos[env_id].get(
agent_id, {}).get("training_enabled", True)):
episode.batch_builder.add_values(
agent_id,
policy_id,
t=episode.length - 1,
eps_id=episode.episode_id,
agent_index=episode._agent_index(agent_id),
obs=last_observation,
actions=episode.last_action_for(agent_id),
rewards=rewards[env_id][agent_id],
prev_actions=episode.prev_action_for(agent_id),
prev_rewards=episode.prev_reward_for(agent_id),
dones=(False if (no_done_at_end
or (hit_horizon and soft_horizon)) else
agent_done),
infos=infos[env_id].get(agent_id, {}),
new_obs=filtered_obs,
**episode.last_pi_info_for(agent_id))
# Invoke the step callback after the step is logged to the episode
if callbacks.get("on_episode_step"):
callbacks["on_episode_step"]({"env": base_env, "episode": episode})
# Cut the batch if we're not packing multiple episodes into one,
# or if we've exceeded the requested batch size.
if episode.batch_builder.has_pending_data():
if dones[env_id]["__all__"] and not no_done_at_end:
episode.batch_builder.check_missing_dones()
if (all_done and not pack) or \
episode.batch_builder.count >= unroll_length:
outputs.append(episode.batch_builder.build_and_reset(episode))
elif all_done:
# Make sure postprocessor stays within one episode
episode.batch_builder.postprocess_batch_so_far(episode)
if all_done:
# Handle episode termination
batch_builder_pool.append(episode.batch_builder)
if callbacks.get("on_episode_end"):
callbacks["on_episode_end"]({
"env": base_env,
"policy": policies,
"episode": episode
})
if hit_horizon and soft_horizon:
episode.soft_reset()
resetted_obs = agent_obs
else:
del active_episodes[env_id]
resetted_obs = base_env.try_reset(env_id)
if resetted_obs is None:
# Reset not supported, drop this env from the ready list
if horizon != float("inf"):
raise ValueError(
"Setting episode horizon requires reset() support "
"from the environment.")
elif resetted_obs != ASYNC_RESET_RETURN:
# Creates a new episode if this is not async return
# If reset is async, we will get its result in some future poll
episode = active_episodes[env_id]
for agent_id, raw_obs in resetted_obs.items():
policy_id = episode.policy_for(agent_id)
policy = _get_or_raise(policies, policy_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
filtered_obs = _get_or_raise(obs_filters,
policy_id)(prep_obs)
episode._set_last_observation(agent_id, filtered_obs)
to_eval[policy_id].append(
PolicyEvalData(
env_id, agent_id, filtered_obs,
episode.last_info_for(agent_id) or {},
episode.rnn_state_for(agent_id),
np.zeros_like(
_flatten_action(policy.action_space.sample())),
0.0))
return active_envs, to_eval, outputs
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes):
"""Call compute actions on observation batches to get next actions.
Returns:
eval_results: dict of policy to compute_action() outputs.
"""
eval_results = {}
if tf_sess:
builder = TFRunBuilder(tf_sess, "policy_eval")
pending_fetches = {}
else:
builder = None
if log_once("compute_actions_input"):
logger.info("Inputs to compute_actions():\n\n{}\n".format(
summarize(to_eval)))
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
policy = _get_or_raise(policies, policy_id)
if builder and (policy.compute_actions.__code__ is
TFPolicy.compute_actions.__code__):
# TODO(ekl): how can we make info batch available to TF code?
pending_fetches[policy_id] = policy._build_compute_actions(
builder, [t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data])
else:
eval_results[policy_id] = policy.compute_actions(
[t.obs for t in eval_data],
rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data],
info_batch=[t.info for t in eval_data],
episodes=[active_episodes[t.env_id] for t in eval_data])
if builder:
for k, v in pending_fetches.items():
eval_results[k] = builder.get(v)
if log_once("compute_actions_result"):
logger.info("Outputs of compute_actions():\n\n{}\n".format(
summarize(eval_results)))
return eval_results
def _process_policy_eval_results(to_eval, eval_results, active_episodes,
active_envs, off_policy_actions, policies,
clip_actions):
"""Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies.
"""
actions_to_send = defaultdict(dict)
for env_id in active_envs:
actions_to_send[env_id] = {} # at minimum send empty dict
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
actions, rnn_out_cols, pi_info_cols = eval_results[policy_id]
if len(rnn_in_cols) != len(rnn_out_cols):
raise ValueError("Length of RNN in did not match RNN out, got: "
"{} vs {}".format(rnn_in_cols, rnn_out_cols))
# Add RNN state info
for f_i, column in enumerate(rnn_in_cols):
pi_info_cols["state_in_{}".format(f_i)] = column
for f_i, column in enumerate(rnn_out_cols):
pi_info_cols["state_out_{}".format(f_i)] = column
# Save output rows
actions = _unbatch_tuple_actions(actions)
policy = _get_or_raise(policies, policy_id)
for i, action in enumerate(actions):
env_id = eval_data[i].env_id
agent_id = eval_data[i].agent_id
if clip_actions:
actions_to_send[env_id][agent_id] = clip_action(
action, policy.action_space)
else:
actions_to_send[env_id][agent_id] = action
episode = active_episodes[env_id]
episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])
episode._set_last_pi_info(
agent_id, {k: v[i]
for k, v in pi_info_cols.items()})
if env_id in off_policy_actions and \
agent_id in off_policy_actions[env_id]:
episode._set_last_action(agent_id,
off_policy_actions[env_id][agent_id])
else:
episode._set_last_action(agent_id, action)
return actions_to_send
def _fetch_atari_metrics(base_env):
"""Atari games have multiple logical episodes, one per life.
However for metrics reporting we count full episodes all lives included.
"""
unwrapped = base_env.get_unwrapped()
if not unwrapped:
return None
atari_out = []
for u in unwrapped:
monitor = get_wrapper_by_cls(u, MonitorEnv)
if not monitor:
return None
for eps_rew, eps_len in monitor.next_episode_results():
atari_out.append(RolloutMetrics(eps_len, eps_rew, {}, {}, {}))
return atari_out
def _unbatch_tuple_actions(action_batch):
# convert list of batches -> batch of lists
if isinstance(action_batch, TupleActions):
out = []
for j in range(len(action_batch.batches[0])):
out.append([
action_batch.batches[i][j]
for i in range(len(action_batch.batches))
])
return out
return action_batch
def _to_column_format(rnn_state_rows):
num_cols = len(rnn_state_rows[0])
return [[row[i] for row in rnn_state_rows] for i in range(num_cols)]
def _get_or_raise(mapping, policy_id):
if policy_id not in mapping:
raise ValueError(
"Could not find policy for agent: agent policy id `{}` not "
"in policy map keys {}.".format(policy_id, mapping.keys()))
return mapping[policy_id]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/tf_policy_graph.py
|
Python
|
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils import renamed_class
TFPolicyGraph = renamed_class(TFPolicy, old_name="TFPolicyGraph")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/torch_policy_graph.py
|
Python
|
from ray.rllib.policy.torch_policy import TorchPolicy
from ray.rllib.utils import renamed_class
TorchPolicyGraph = renamed_class(TorchPolicy, old_name="TorchPolicyGraph")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/evaluation/worker_set.py
|
Python
|
import logging
from types import FunctionType
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.evaluation.rollout_worker import RolloutWorker, \
_validate_multiagent_config
from ray.rllib.offline import NoopOutput, JsonReader, MixedInput, JsonWriter, \
ShuffledInput
from ray.rllib.utils import merge_dicts, try_import_tf
from ray.rllib.utils.memory import ray_get_and_free
tf = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
class WorkerSet:
"""Represents a set of RolloutWorkers.
There must be one local worker copy, and zero or more remote workers.
"""
def __init__(self,
env_creator,
policy,
trainer_config=None,
num_workers=0,
logdir=None,
_setup=True):
"""Create a new WorkerSet and initialize its workers.
Arguments:
env_creator (func): Function that returns env given env config.
policy (cls): rllib.policy.Policy class.
trainer_config (dict): Optional dict that extends the common
config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (str): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
if not trainer_config:
from ray.rllib.agents.trainer import COMMON_CONFIG
trainer_config = COMMON_CONFIG
self._env_creator = env_creator
self._policy = policy
self._remote_config = trainer_config
self._num_workers = num_workers
self._logdir = logdir
if _setup:
self._local_config = merge_dicts(
trainer_config,
{"tf_session_args": trainer_config["local_tf_session_args"]})
# Always create a local worker
self._local_worker = self._make_worker(
RolloutWorker, env_creator, policy, 0, self._local_config)
# Create a number of remote workers
self._remote_workers = []
self.add_workers(num_workers)
def local_worker(self):
"""Return the local rollout worker."""
return self._local_worker
def remote_workers(self):
"""Return a list of remote rollout workers."""
return self._remote_workers
def add_workers(self, num_workers):
"""Create and add a number of remote workers to this worker set."""
remote_args = {
"num_cpus": self._remote_config["num_cpus_per_worker"],
"num_gpus": self._remote_config["num_gpus_per_worker"],
"memory": self._remote_config["memory_per_worker"],
"object_store_memory": self._remote_config[
"object_store_memory_per_worker"],
"resources": self._remote_config["custom_resources_per_worker"],
}
cls = RolloutWorker.as_remote(**remote_args).remote
self._remote_workers.extend([
self._make_worker(cls, self._env_creator, self._policy, i + 1,
self._remote_config) for i in range(num_workers)
])
def reset(self, new_remote_workers):
"""Called to change the set of remote workers."""
self._remote_workers = new_remote_workers
def stop(self):
"""Stop all rollout workers."""
self.local_worker().stop()
for w in self.remote_workers():
w.stop.remote()
w.__ray_terminate__.remote()
@DeveloperAPI
def foreach_worker(self, func):
"""Apply the given function to each worker instance."""
local_result = [func(self.local_worker())]
remote_results = ray_get_and_free(
[w.apply.remote(func) for w in self.remote_workers()])
return local_result + remote_results
@DeveloperAPI
def foreach_worker_with_index(self, func):
"""Apply the given function to each worker instance.
The index will be passed as the second arg to the given function.
"""
local_result = [func(self.local_worker(), 0)]
remote_results = ray_get_and_free([
w.apply.remote(func, i + 1)
for i, w in enumerate(self.remote_workers())
])
return local_result + remote_results
@staticmethod
def _from_existing(local_worker, remote_workers=None):
workers = WorkerSet(None, None, {}, _setup=False)
workers._local_worker = local_worker
workers._remote_workers = remote_workers or []
return workers
def _make_worker(self, cls, env_creator, policy, worker_index, config):
def session_creator():
logger.debug("Creating TF session {}".format(
config["tf_session_args"]))
return tf.Session(
config=tf.ConfigProto(**config["tf_session_args"]))
if isinstance(config["input"], FunctionType):
input_creator = config["input"]
elif config["input"] == "sampler":
input_creator = (lambda ioctx: ioctx.default_sampler_input())
elif isinstance(config["input"], dict):
input_creator = (lambda ioctx: ShuffledInput(
MixedInput(config["input"], ioctx), config[
"shuffle_buffer_size"]))
else:
input_creator = (lambda ioctx: ShuffledInput(
JsonReader(config["input"], ioctx), config[
"shuffle_buffer_size"]))
if isinstance(config["output"], FunctionType):
output_creator = config["output"]
elif config["output"] is None:
output_creator = (lambda ioctx: NoopOutput())
elif config["output"] == "logdir":
output_creator = (lambda ioctx: JsonWriter(
ioctx.log_dir,
ioctx,
max_file_size=config["output_max_file_size"],
compress_columns=config["output_compress_columns"]))
else:
output_creator = (lambda ioctx: JsonWriter(
config["output"],
ioctx,
max_file_size=config["output_max_file_size"],
compress_columns=config["output_compress_columns"]))
if config["input"] == "sampler":
input_evaluation = []
else:
input_evaluation = config["input_evaluation"]
# Fill in the default policy if 'None' is specified in multiagent
if config["multiagent"]["policies"]:
tmp = config["multiagent"]["policies"]
_validate_multiagent_config(tmp, allow_none_graph=True)
for k, v in tmp.items():
if v[0] is None:
tmp[k] = (policy, v[1], v[2], v[3])
policy = tmp
return cls(
env_creator,
policy,
policy_mapping_fn=config["multiagent"]["policy_mapping_fn"],
policies_to_train=config["multiagent"]["policies_to_train"],
tf_session_creator=(session_creator
if config["tf_session_args"] else None),
batch_steps=config["sample_batch_size"],
batch_mode=config["batch_mode"],
episode_horizon=config["horizon"],
preprocessor_pref=config["preprocessor_pref"],
sample_async=config["sample_async"],
compress_observations=config["compress_observations"],
num_envs=config["num_envs_per_worker"],
observation_filter=config["observation_filter"],
clip_rewards=config["clip_rewards"],
clip_actions=config["clip_actions"],
env_config=config["env_config"],
model_config=config["model"],
policy_config=config,
worker_index=worker_index,
monitor_path=self._logdir if config["monitor"] else None,
log_dir=self._logdir,
log_level=config["log_level"],
callbacks=config["callbacks"],
input_creator=input_creator,
input_evaluation=input_evaluation,
output_creator=output_creator,
remote_worker_envs=config["remote_worker_envs"],
remote_env_batch_wait_ms=config["remote_env_batch_wait_ms"],
soft_horizon=config["soft_horizon"],
no_done_at_end=config["no_done_at_end"],
seed=(config["seed"] + worker_index)
if config["seed"] is not None else None,
_fake_sampler=config.get("_fake_sampler", False))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/autoregressive_action_dist.py
|
Python
|
"""Example of specifying an autoregressive action distribution.
In an action space with multiple components (e.g., Tuple(a1, a2)), you might
want a2 to be sampled based on the sampled value of a1, i.e.,
a2_sampled ~ P(a2 | a1_sampled, obs). Normally, a1 and a2 would be sampled
independently.
To do this, you need both a custom model that implements the autoregressive
pattern, and a custom action distribution class that leverages that model.
This examples shows both.
"""
import gym
from gym.spaces import Discrete, Tuple
import argparse
import random
import ray
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Categorical, ActionDistribution
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.policy.policy import TupleActions
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO") # try PG, PPO, IMPALA
parser.add_argument("--stop", type=int, default=200)
class CorrelatedActionsEnv(gym.Env):
"""Simple env in which the policy has to emit a tuple of equal actions.
The best score would be ~200 reward."""
def __init__(self, _):
self.observation_space = Discrete(2)
self.action_space = Tuple([Discrete(2), Discrete(2)])
def reset(self):
self.t = 0
self.last = random.choice([0, 1])
return self.last
def step(self, action):
self.t += 1
a1, a2 = action
reward = 0
if a1 == self.last:
reward += 5
# encourage correlation between a1 and a2
if a1 == a2:
reward += 5
done = self.t > 20
self.last = random.choice([0, 1])
return self.last, reward, done, {}
class BinaryAutoregressiveOutput(ActionDistribution):
"""Action distribution P(a1, a2) = P(a1) * P(a2 | a1)"""
@staticmethod
def required_model_output_shape(self, model_config):
return 16 # controls model output feature vector size
def sample(self):
# first, sample a1
a1_dist = self._a1_distribution()
a1 = a1_dist.sample()
# sample a2 conditioned on a1
a2_dist = self._a2_distribution(a1)
a2 = a2_dist.sample()
self._action_logp = a1_dist.logp(a1) + a2_dist.logp(a2)
# return the action tuple
return TupleActions([a1, a2])
def logp(self, actions):
a1, a2 = actions[:, 0], actions[:, 1]
a1_vec = tf.expand_dims(tf.cast(a1, tf.float32), 1)
a1_logits, a2_logits = self.model.action_model([self.inputs, a1_vec])
return (
Categorical(a1_logits).logp(a1) + Categorical(a2_logits).logp(a2))
def sampled_action_logp(self):
return tf.exp(self._action_logp)
def entropy(self):
a1_dist = self._a1_distribution()
a2_dist = self._a2_distribution(a1_dist.sample())
return a1_dist.entropy() + a2_dist.entropy()
def kl(self, other):
a1_dist = self._a1_distribution()
a1_terms = a1_dist.kl(other._a1_distribution())
a1 = a1_dist.sample()
a2_terms = self._a2_distribution(a1).kl(other._a2_distribution(a1))
return a1_terms + a2_terms
def _a1_distribution(self):
BATCH = tf.shape(self.inputs)[0]
a1_logits, _ = self.model.action_model(
[self.inputs, tf.zeros((BATCH, 1))])
a1_dist = Categorical(a1_logits)
return a1_dist
def _a2_distribution(self, a1):
a1_vec = tf.expand_dims(tf.cast(a1, tf.float32), 1)
_, a2_logits = self.model.action_model([self.inputs, a1_vec])
a2_dist = Categorical(a2_logits)
return a2_dist
class AutoregressiveActionsModel(TFModelV2):
"""Implements the `.action_model` branch required above."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(AutoregressiveActionsModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
if action_space != Tuple([Discrete(2), Discrete(2)]):
raise ValueError(
"This model only supports the [2, 2] action space")
# Inputs
obs_input = tf.keras.layers.Input(
shape=obs_space.shape, name="obs_input")
a1_input = tf.keras.layers.Input(shape=(1, ), name="a1_input")
ctx_input = tf.keras.layers.Input(
shape=(num_outputs, ), name="ctx_input")
# Output of the model (normally 'logits', but for an autoregressive
# dist this is more like a context/feature layer encoding the obs)
context = tf.keras.layers.Dense(
num_outputs,
name="hidden",
activation=tf.nn.tanh,
kernel_initializer=normc_initializer(1.0))(obs_input)
# V(s)
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(context)
# P(a1 | obs)
a1_logits = tf.keras.layers.Dense(
2,
name="a1_logits",
activation=None,
kernel_initializer=normc_initializer(0.01))(ctx_input)
# P(a2 | a1)
# --note: typically you'd want to implement P(a2 | a1, obs) as follows:
# a2_context = tf.keras.layers.Concatenate(axis=1)(
# [ctx_input, a1_input])
a2_context = a1_input
a2_hidden = tf.keras.layers.Dense(
16,
name="a2_hidden",
activation=tf.nn.tanh,
kernel_initializer=normc_initializer(1.0))(a2_context)
a2_logits = tf.keras.layers.Dense(
2,
name="a2_logits",
activation=None,
kernel_initializer=normc_initializer(0.01))(a2_hidden)
# Base layers
self.base_model = tf.keras.Model(obs_input, [context, value_out])
self.register_variables(self.base_model.variables)
self.base_model.summary()
# Autoregressive action sampler
self.action_model = tf.keras.Model([ctx_input, a1_input],
[a1_logits, a2_logits])
self.action_model.summary()
self.register_variables(self.action_model.variables)
def forward(self, input_dict, state, seq_lens):
context, self._value_out = self.base_model(input_dict["obs"])
return context, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
ModelCatalog.register_custom_model("autoregressive_model",
AutoregressiveActionsModel)
ModelCatalog.register_custom_action_dist("binary_autoreg_output",
BinaryAutoregressiveOutput)
tune.run(
args.run,
stop={"episode_reward_mean": args.stop},
config={
"env": CorrelatedActionsEnv,
"gamma": 0.5,
"num_gpus": 0,
"model": {
"custom_model": "autoregressive_model",
"custom_action_dist": "binary_autoreg_output",
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/batch_norm_model.py
|
Python
|
"""Example of using a custom model with batch norm."""
import argparse
import ray
from ray import tune
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=200)
parser.add_argument("--run", type=str, default="PPO")
class BatchNormModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
last_layer = input_dict["obs"]
hiddens = [256, 256]
for i, size in enumerate(hiddens):
label = "fc{}".format(i)
last_layer = tf.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name=label)
# Add a batch norm layer
last_layer = tf.layers.batch_normalization(
last_layer, training=input_dict["is_training"])
output = tf.layers.dense(
last_layer,
num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")
return output, last_layer
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
ModelCatalog.register_custom_model("bn_model", BatchNormModel)
tune.run(
args.run,
stop={"training_iteration": args.num_iters},
config={
"env": "Pendulum-v0" if args.run == "DDPG" else "CartPole-v0",
"model": {
"custom_model": "bn_model",
},
"num_workers": 0,
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/cartpole_lstm.py
|
Python
|
"""Partially observed variant of the CartPole gym environment.
https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
We delete the velocity component of the state, so that it can only be solved
by a LSTM policy."""
import argparse
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=200)
parser.add_argument("--use-prev-action-reward", action="store_true")
parser.add_argument("--run", type=str, default="PPO")
class CartPoleStatelessEnv(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 60
}
def __init__(self, config=None):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
high = np.array([
self.x_threshold * 2,
self.theta_threshold_radians * 2,
])
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(
action), "%r (%s) invalid" % (action, type(action))
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length *
(4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)
)
xacc = (temp -
self.polemass_length * thetaacc * costheta / self.total_mass)
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
self.state = (x, x_dot, theta, theta_dot)
done = (x < -self.x_threshold or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians)
done = bool(done)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
self.steps_beyond_done += 1
reward = 0.0
rv = np.r_[self.state[0], self.state[2]]
return rv, reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4, ))
self.steps_beyond_done = None
rv = np.r_[self.state[0], self.state[2]]
return rv
def render(self, mode="human"):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width / world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * 1.0
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = (-cartwidth / 2, cartwidth / 2, cartheight / 2,
-cartheight / 2)
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = (-polewidth / 2, polewidth / 2,
polelen - polewidth / 2, -polewidth / 2)
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
if self.state is None:
return None
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
if __name__ == "__main__":
import ray
from ray import tune
args = parser.parse_args()
tune.register_env("cartpole_stateless", lambda _: CartPoleStatelessEnv())
ray.init()
configs = {
"PPO": {
"num_sgd_iter": 5,
"vf_share_layers": True,
"vf_loss_coeff": 0.0001,
},
"IMPALA": {
"num_workers": 2,
"num_gpus": 0,
"vf_loss_coeff": 0.01,
},
}
tune.run(
args.run,
stop={"episode_reward_mean": args.stop},
config=dict(
configs[args.run], **{
"env": "cartpole_stateless",
"model": {
"use_lstm": True,
"lstm_use_prev_action_reward": args.use_prev_action_reward,
},
}),
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/centralized_critic.py
|
Python
|
"""An example of customizing PPO to leverage a centralized critic.
Here the model and policy are hard-coded to implement a centralized critic
for TwoStepGame, but you can adapt this for your own use cases.
Compared to simply running `twostep_game.py --run=PPO`, this centralized
critic version reaches vf_explained_variance=1.0 more stably since it takes
into account the opponent actions as well as the policy's. Note that this is
also using two independent policies instead of weight-sharing with one.
See also: centralized_critic_2.py for a simpler approach that instead
modifies the environment.
"""
import argparse
import numpy as np
from gym.spaces import Discrete
from ray import tune
from ray.rllib.agents.ppo.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy, KLCoeffMixin, \
PPOLoss, BEHAVIOUR_LOGITS
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.examples.twostep_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule, ACTION_LOGP
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils.tf_ops import make_tf_callable
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
OPPONENT_OBS = "opponent_obs"
OPPONENT_ACTION = "opponent_action"
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=100000)
class CentralizedCriticModel(TFModelV2):
"""Multi-agent model that implements a centralized VF."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(CentralizedCriticModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
# Base of the model
self.model = FullyConnectedNetwork(obs_space, action_space,
num_outputs, model_config, name)
self.register_variables(self.model.variables())
# Central VF maps (obs, opp_obs, opp_act) -> vf_pred
obs = tf.keras.layers.Input(shape=(6, ), name="obs")
opp_obs = tf.keras.layers.Input(shape=(6, ), name="opp_obs")
opp_act = tf.keras.layers.Input(shape=(2, ), name="opp_act")
concat_obs = tf.keras.layers.Concatenate(axis=1)(
[obs, opp_obs, opp_act])
central_vf_dense = tf.keras.layers.Dense(
16, activation=tf.nn.tanh, name="c_vf_dense")(concat_obs)
central_vf_out = tf.keras.layers.Dense(
1, activation=None, name="c_vf_out")(central_vf_dense)
self.central_vf = tf.keras.Model(
inputs=[obs, opp_obs, opp_act], outputs=central_vf_out)
self.register_variables(self.central_vf.variables)
def forward(self, input_dict, state, seq_lens):
return self.model.forward(input_dict, state, seq_lens)
def central_value_function(self, obs, opponent_obs, opponent_actions):
return tf.reshape(
self.central_vf(
[obs, opponent_obs,
tf.one_hot(opponent_actions, 2)]), [-1])
def value_function(self):
return self.model.value_function() # not used
class CentralizedValueMixin:
"""Add method to evaluate the central value function from the model."""
def __init__(self):
self.compute_central_vf = make_tf_callable(self.get_session())(
self.model.central_value_function)
# Grabs the opponent obs/act and includes it in the experience train_batch,
# and computes GAE using the central vf predictions.
def centralized_critic_postprocessing(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if policy.loss_initialized():
assert sample_batch["dones"][-1], \
"Not implemented for train_batch_mode=truncate_episodes"
assert other_agent_batches is not None
[(_, opponent_batch)] = list(other_agent_batches.values())
# also record the opponent obs and actions in the trajectory
sample_batch[OPPONENT_OBS] = opponent_batch[SampleBatch.CUR_OBS]
sample_batch[OPPONENT_ACTION] = opponent_batch[SampleBatch.ACTIONS]
# overwrite default VF prediction with the central VF
sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(
sample_batch[SampleBatch.CUR_OBS], sample_batch[OPPONENT_OBS],
sample_batch[OPPONENT_ACTION])
else:
# policy hasn't initialized yet, use zeros
sample_batch[OPPONENT_OBS] = np.zeros_like(
sample_batch[SampleBatch.CUR_OBS])
sample_batch[OPPONENT_ACTION] = np.zeros_like(
sample_batch[SampleBatch.ACTIONS])
sample_batch[SampleBatch.VF_PREDS] = np.zeros_like(
sample_batch[SampleBatch.ACTIONS], dtype=np.float32)
train_batch = compute_advantages(
sample_batch,
0.0,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"])
return train_batch
# Copied from PPO but optimizing the central value function
def loss_with_central_critic(policy, model, dist_class, train_batch):
CentralizedValueMixin.__init__(policy)
logits, state = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
policy.central_value_out = policy.model.central_value_function(
train_batch[SampleBatch.CUR_OBS], train_batch[OPPONENT_OBS],
train_batch[OPPONENT_ACTION])
policy.loss_obj = PPOLoss(
policy.action_space,
dist_class,
model,
train_batch[Postprocessing.VALUE_TARGETS],
train_batch[Postprocessing.ADVANTAGES],
train_batch[SampleBatch.ACTIONS],
train_batch[BEHAVIOUR_LOGITS],
train_batch[ACTION_LOGP],
train_batch[SampleBatch.VF_PREDS],
action_dist,
policy.central_value_out,
policy.kl_coeff,
tf.ones_like(train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool),
entropy_coeff=policy.entropy_coeff,
clip_param=policy.config["clip_param"],
vf_clip_param=policy.config["vf_clip_param"],
vf_loss_coeff=policy.config["vf_loss_coeff"],
use_gae=policy.config["use_gae"],
model_config=policy.config["model"])
return policy.loss_obj.loss
def setup_mixins(policy, obs_space, action_space, config):
# copied from PPO
KLCoeffMixin.__init__(policy, config)
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def central_vf_stats(policy, train_batch, grads):
# Report the explained variance of the central value function.
return {
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.central_value_out),
}
CCPPO = PPOTFPolicy.with_updates(
name="CCPPO",
postprocess_fn=centralized_critic_postprocessing,
loss_fn=loss_with_central_critic,
before_loss_init=setup_mixins,
grad_stats_fn=central_vf_stats,
mixins=[
LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin,
CentralizedValueMixin
])
CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO)
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel)
tune.run(
CCTrainer,
stop={
"timesteps_total": args.stop,
"episode_reward_mean": 7.99,
},
config={
"env": TwoStepGame,
"batch_mode": "complete_episodes",
"eager": False,
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, Discrete(6), TwoStepGame.action_space, {}),
"pol2": (None, Discrete(6), TwoStepGame.action_space, {}),
},
"policy_mapping_fn": lambda x: "pol1" if x == 0 else "pol2",
},
"model": {
"custom_model": "cc_model",
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/centralized_critic_2.py
|
Python
|
"""An example of implementing a centralized critic by modifying the env.
The advantage of this approach is that it's very simple and you don't have to
change the algorithm at all -- just use an env wrapper and custom model.
However, it is a bit less principled in that you have to change the agent
observation spaces and the environment.
See also: centralized_critic.py for an alternative approach that instead
modifies the policy to add a centralized value function.
"""
import numpy as np
from gym.spaces import Box, Dict, Discrete
import argparse
from ray import tune
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.examples.twostep_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=100000)
class CentralizedCriticModel(TFModelV2):
"""Multi-agent model that implements a centralized VF.
It assumes the observation is a dict with 'own_obs' and 'opponent_obs', the
former of which can be used for computing actions (i.e., decentralized
execution), and the latter for optimization (i.e., centralized learning).
This model has two parts:
- An action model that looks at just 'own_obs' to compute actions
- A value model that also looks at the 'opponent_obs' / 'opponent_action'
to compute the value (it does this by using the 'obs_flat' tensor).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(CentralizedCriticModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
self.action_model = FullyConnectedNetwork(
Box(low=0, high=1, shape=(6, )), # one-hot encoded Discrete(6)
action_space,
num_outputs,
model_config,
name + "_action")
self.register_variables(self.action_model.variables())
self.value_model = FullyConnectedNetwork(obs_space, action_space, 1,
model_config, name + "_vf")
self.register_variables(self.value_model.variables())
def forward(self, input_dict, state, seq_lens):
self._value_out, _ = self.value_model({
"obs": input_dict["obs_flat"]
}, state, seq_lens)
return self.action_model({
"obs": input_dict["obs"]["own_obs"]
}, state, seq_lens)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class GlobalObsTwoStepGame(MultiAgentEnv):
action_space = Discrete(2)
observation_space = Dict({
"own_obs": Discrete(6),
"opponent_obs": Discrete(6),
"opponent_action": Discrete(2),
})
def __init__(self, env_config):
self.env = TwoStepGame(env_config)
def reset(self):
obs_dict = self.env.reset()
return self.to_global_obs(obs_dict)
def step(self, action_dict):
obs_dict, rewards, dones, infos = self.env.step(action_dict)
return self.to_global_obs(obs_dict), rewards, dones, infos
def to_global_obs(self, obs_dict):
return {
self.env.agent_1: {
"own_obs": obs_dict[self.env.agent_1],
"opponent_obs": obs_dict[self.env.agent_2],
"opponent_action": 0, # populated by fill_in_actions
},
self.env.agent_2: {
"own_obs": obs_dict[self.env.agent_2],
"opponent_obs": obs_dict[self.env.agent_1],
"opponent_action": 0, # populated by fill_in_actions
},
}
def fill_in_actions(info):
"""Callback that saves opponent actions into the agent obs.
If you don't care about opponent actions you can leave this out."""
to_update = info["post_batch"][SampleBatch.CUR_OBS]
my_id = info["agent_id"]
other_id = 1 if my_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(Discrete(2))
# set the opponent actions into the observation
_, opponent_batch = info["all_pre_batches"][other_id]
opponent_actions = np.array([
action_encoder.transform(a)
for a in opponent_batch[SampleBatch.ACTIONS]
])
to_update[:, -2:] = opponent_actions
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel)
tune.run(
"PPO",
stop={
"timesteps_total": args.stop,
"episode_reward_mean": 7.99,
},
config={
"env": GlobalObsTwoStepGame,
"batch_mode": "complete_episodes",
"callbacks": {
"on_postprocess_traj": fill_in_actions,
},
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, GlobalObsTwoStepGame.observation_space,
GlobalObsTwoStepGame.action_space, {}),
"pol2": (None, GlobalObsTwoStepGame.observation_space,
GlobalObsTwoStepGame.action_space, {}),
},
"policy_mapping_fn": lambda x: "pol1" if x == 0 else "pol2",
},
"model": {
"custom_model": "cc_model",
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_env.py
|
Python
|
"""Example of a custom gym environment and model. Run this for a demo.
This example shows:
- using a custom environment
- using a custom model
- using Tune for grid search
You can visualize experiment results in ~/ray_results using TensorBoard.
"""
import numpy as np
import gym
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from gym.spaces import Discrete, Box
import ray
from ray import tune
from ray.rllib.utils import try_import_tf
from ray.tune import grid_search
tf = try_import_tf()
class SimpleCorridor(gym.Env):
"""Example of a custom env in which you have to walk down a corridor.
You can configure the length of the corridor via the env config."""
def __init__(self, config):
self.end_pos = config["corridor_length"]
self.cur_pos = 0
self.action_space = Discrete(2)
self.observation_space = Box(
0.0, self.end_pos, shape=(1, ), dtype=np.float32)
def reset(self):
self.cur_pos = 0
return [self.cur_pos]
def step(self, action):
assert action in [0, 1], action
if action == 0 and self.cur_pos > 0:
self.cur_pos -= 1
elif action == 1:
self.cur_pos += 1
done = self.cur_pos >= self.end_pos
return [self.cur_pos], 1 if done else 0, done, {}
class CustomModel(TFModelV2):
"""Example of a custom model that just delegates to a fc-net."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(CustomModel, self).__init__(obs_space, action_space, num_outputs,
model_config, name)
self.model = FullyConnectedNetwork(obs_space, action_space,
num_outputs, model_config, name)
self.register_variables(self.model.variables())
def forward(self, input_dict, state, seq_lens):
return self.model.forward(input_dict, state, seq_lens)
def value_function(self):
return self.model.value_function()
if __name__ == "__main__":
# Can also register the env creator function explicitly with:
# register_env("corridor", lambda config: SimpleCorridor(config))
ray.init()
ModelCatalog.register_custom_model("my_model", CustomModel)
tune.run(
"PPO",
stop={
"timesteps_total": 10000,
},
config={
"env": SimpleCorridor, # or "corridor" if registered above
"model": {
"custom_model": "my_model",
},
"vf_share_layers": True,
"lr": grid_search([1e-2, 1e-4, 1e-6]), # try different lrs
"num_workers": 1, # parallelism
"env_config": {
"corridor_length": 5,
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_fast_model.py
|
Python
|
"""Example of using a custom image env and model.
Both the model and env are trivial (and super-fast), so they are useful
for running perf microbenchmarks.
"""
from gym.spaces import Discrete, Box
import gym
import numpy as np
import ray
from ray.rllib.models import Model, ModelCatalog
from ray.tune import run_experiments, sample_from
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class FastModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
bias = tf.get_variable(
dtype=tf.float32,
name="bias",
initializer=tf.zeros_initializer,
shape=())
output = bias + tf.zeros([tf.shape(input_dict["obs"])[0], num_outputs])
return output, output
class FastImageEnv(gym.Env):
def __init__(self, config):
self.zeros = np.zeros((84, 84, 4))
self.action_space = Discrete(2)
self.observation_space = Box(
0.0, 1.0, shape=(84, 84, 4), dtype=np.float32)
self.i = 0
def reset(self):
self.i = 0
return self.zeros
def step(self, action):
self.i += 1
return self.zeros, 1, self.i > 1000, {}
if __name__ == "__main__":
ray.init()
ModelCatalog.register_custom_model("fast_model", FastModel)
run_experiments({
"demo": {
"run": "IMPALA",
"env": FastImageEnv,
"config": {
"compress_observations": True,
"model": {
"custom_model": "fast_model"
},
"num_gpus": 0,
"num_workers": 2,
"num_envs_per_worker": 10,
"num_data_loader_buffers": 1,
"num_aggregation_workers": 1,
"broadcast_interval": 50,
"sample_batch_size": 100,
"train_batch_size": sample_from(
lambda spec: 1000 * max(1, spec.config.num_gpus)),
"_fake_sampler": True,
},
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_keras_model.py
|
Python
|
"""Example of using a custom ModelV2 Keras-style model."""
import argparse
import ray
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.agents.dqn.distributional_q_model import DistributionalQModel
from ray.rllib.utils import try_import_tf
from ray.rllib.models.tf.visionnet_v2 import VisionNetwork as MyVisionNetwork
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="DQN") # Try PG, PPO, DQN
parser.add_argument("--stop", type=int, default=200)
parser.add_argument("--use_vision_network", action="store_true")
class MyKerasModel(TFModelV2):
"""Custom model for policy gradient algorithms."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(MyKerasModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
layer_1 = tf.keras.layers.Dense(
128,
name="my_layer1",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(self.inputs)
layer_out = tf.keras.layers.Dense(
num_outputs,
name="my_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])
self.register_variables(self.base_model.variables)
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
class MyKerasQModel(DistributionalQModel):
"""Custom model for DQN."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, **kw):
super(MyKerasQModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
# Define the core model layers which will be used by the other
# output heads of DistributionalQModel
self.inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
layer_1 = tf.keras.layers.Dense(
128,
name="my_layer1",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(self.inputs)
layer_out = tf.keras.layers.Dense(
num_outputs,
name="my_out",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(layer_1)
self.base_model = tf.keras.Model(self.inputs, layer_out)
self.register_variables(self.base_model.variables)
# Implement the core forward method
def forward(self, input_dict, state, seq_lens):
model_out = self.base_model(input_dict["obs"])
return model_out, state
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
ModelCatalog.register_custom_model(
"keras_model", MyVisionNetwork
if args.use_vision_network else MyKerasModel)
ModelCatalog.register_custom_model(
"keras_q_model", MyVisionNetwork
if args.use_vision_network else MyKerasQModel)
tune.run(
args.run,
stop={"episode_reward_mean": args.stop},
config={
"env": "BreakoutNoFrameskip-v4"
if args.use_vision_network else "CartPole-v0",
"num_gpus": 0,
"model": {
"custom_model": "keras_q_model"
if args.run == "DQN" else "keras_model"
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_keras_rnn_model.py
|
Python
|
"""Example of using a custom RNN keras model."""
import gym
from gym.spaces import Discrete
import numpy as np
import random
import argparse
import ray
from ray import tune
from ray.tune.registry import register_env
from ray.rllib.models import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--env", type=str, default="RepeatAfterMeEnv")
parser.add_argument("--stop", type=int, default=90)
class MyKerasRNN(RecurrentTFModelV2):
"""Example of using the Keras functional API to define a RNN model."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
hiddens_size=256,
cell_size=64):
super(MyKerasRNN, self).__init__(obs_space, action_space, num_outputs,
model_config, name)
self.cell_size = cell_size
# Define input layers
input_layer = tf.keras.layers.Input(
shape=(None, obs_space.shape[0]), name="inputs")
state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")
state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
# Preprocess observation with a hidden layer and send to LSTM cell
dense1 = tf.keras.layers.Dense(
hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer)
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm")(
inputs=dense1,
mask=tf.sequence_mask(seq_in),
initial_state=[state_in_h, state_in_c])
# Postprocess LSTM output with another hidden layer and compute values
logits = tf.keras.layers.Dense(
self.num_outputs,
activation=tf.keras.activations.linear,
name="logits")(lstm_out)
values = tf.keras.layers.Dense(
1, activation=None, name="values")(lstm_out)
# Create the RNN model
self.rnn_model = tf.keras.Model(
inputs=[input_layer, seq_in, state_in_h, state_in_c],
outputs=[logits, values, state_h, state_c])
self.register_variables(self.rnn_model.variables)
self.rnn_model.summary()
@override(RecurrentTFModelV2)
def forward_rnn(self, inputs, state, seq_lens):
model_out, self._value_out, h, c = self.rnn_model([inputs, seq_lens] +
state)
return model_out, [h, c]
@override(ModelV2)
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class RepeatInitialEnv(gym.Env):
"""Simple env in which the policy learns to repeat the initial observation
seen at timestep 0."""
def __init__(self):
self.observation_space = Discrete(2)
self.action_space = Discrete(2)
self.token = None
self.num_steps = 0
def reset(self):
self.token = random.choice([0, 1])
self.num_steps = 0
return self.token
def step(self, action):
if action == self.token:
reward = 1
else:
reward = -1
self.num_steps += 1
done = self.num_steps > 100
return 0, reward, done, {}
class RepeatAfterMeEnv(gym.Env):
"""Simple env in which the policy learns to repeat a previous observation
token after a given delay."""
def __init__(self, config):
self.observation_space = Discrete(2)
self.action_space = Discrete(2)
self.delay = config["repeat_delay"]
assert self.delay >= 1, "delay must be at least 1"
self.history = []
def reset(self):
self.history = [0] * self.delay
return self._next_obs()
def step(self, action):
if action == self.history[-(1 + self.delay)]:
reward = 1
else:
reward = -1
done = len(self.history) > 100
return self._next_obs(), reward, done, {}
def _next_obs(self):
token = random.choice([0, 1])
self.history.append(token)
return token
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
ModelCatalog.register_custom_model("rnn", MyKerasRNN)
register_env("RepeatAfterMeEnv", lambda c: RepeatAfterMeEnv(c))
register_env("RepeatInitialEnv", lambda _: RepeatInitialEnv())
tune.run(
args.run,
stop={"episode_reward_mean": args.stop},
config={
"env": args.env,
"env_config": {
"repeat_delay": 2,
},
"gamma": 0.9,
"num_workers": 0,
"num_envs_per_worker": 20,
"entropy_coeff": 0.001,
"num_sgd_iter": 5,
"vf_loss_coeff": 1e-5,
"model": {
"custom_model": "rnn",
"max_seq_len": 20,
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_loss.py
|
Python
|
"""Example of using custom_loss() with an imitation learning loss.
The default input file is too small to learn a good policy, but you can
generate new experiences for IL training as follows:
To generate experiences:
$ ./train.py --run=PG --config='{"output": "/tmp/cartpole"}' --env=CartPole-v0
To train on experiences with joint PG + IL loss:
$ python custom_loss.py --input-files=/tmp/cartpole
"""
import argparse
import os
import ray
from ray import tune
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.offline import JsonReader
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--iters", type=int, default=200)
parser.add_argument(
"--input-files",
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../tests/data/cartpole_small"))
class CustomLossModel(Model):
"""Custom model that adds an imitation loss on top of the policy loss."""
def _build_layers_v2(self, input_dict, num_outputs, options):
self.obs_in = input_dict["obs"]
with tf.variable_scope("shared", reuse=tf.AUTO_REUSE):
self.fcnet = FullyConnectedNetwork(input_dict, self.obs_space,
self.action_space, num_outputs,
options)
return self.fcnet.outputs, self.fcnet.last_layer
def custom_loss(self, policy_loss, loss_inputs):
# create a new input reader per worker
reader = JsonReader(self.options["custom_options"]["input_files"])
input_ops = reader.tf_input_ops()
# define a secondary loss by building a graph copy with weight sharing
obs = tf.cast(input_ops["obs"], tf.float32)
logits, _ = self._build_layers_v2({
"obs": restore_original_dimensions(obs, self.obs_space)
}, self.num_outputs, self.options)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
# loss can be added as follows:
# ae_loss = squared_diff(
# loss_inputs["obs"], Decoder(self.fcnet.last_layer))
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# compute the IL loss
action_dist = Categorical(logits, self.options)
self.policy_loss = policy_loss
self.imitation_loss = tf.reduce_mean(
-action_dist.logp(input_ops["actions"]))
return policy_loss + 10 * self.imitation_loss
def custom_stats(self):
return {
"policy_loss": self.policy_loss,
"imitation_loss": self.imitation_loss,
}
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
ModelCatalog.register_custom_model("custom_loss", CustomLossModel)
tune.run(
"PG",
stop={
"training_iteration": args.iters,
},
config={
"env": "CartPole-v0",
"num_workers": 0,
"model": {
"custom_model": "custom_loss",
"custom_options": {
"input_files": args.input_files,
},
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_metrics_and_callbacks.py
|
Python
|
"""Example of using RLlib's debug callbacks.
Here we use callbacks to track the average CartPole pole angle magnitude as a
custom metric.
"""
import argparse
import numpy as np
import ray
from ray import tune
def on_episode_start(info):
episode = info["episode"]
print("episode {} started".format(episode.episode_id))
episode.user_data["pole_angles"] = []
def on_episode_step(info):
episode = info["episode"]
pole_angle = abs(episode.last_observation_for()[2])
raw_angle = abs(episode.last_raw_obs_for()[2])
assert pole_angle == raw_angle
episode.user_data["pole_angles"].append(pole_angle)
def on_episode_end(info):
episode = info["episode"]
pole_angle = np.mean(episode.user_data["pole_angles"])
print("episode {} ended with length {} and pole angles {}".format(
episode.episode_id, episode.length, pole_angle))
episode.custom_metrics["pole_angle"] = pole_angle
def on_sample_end(info):
print("returned sample batch of size {}".format(info["samples"].count))
def on_train_result(info):
print("trainer.train() result: {} -> {} episodes".format(
info["trainer"], info["result"]["episodes_this_iter"]))
# you can mutate the result dict to add new fields to return
info["result"]["callback_ok"] = True
def on_postprocess_traj(info):
episode = info["episode"]
batch = info["post_batch"]
print("postprocessed {} steps".format(batch.count))
if "num_batches" not in episode.custom_metrics:
episode.custom_metrics["num_batches"] = 0
episode.custom_metrics["num_batches"] += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=2000)
args = parser.parse_args()
ray.init()
trials = tune.run(
"PG",
stop={
"training_iteration": args.num_iters,
},
config={
"env": "CartPole-v0",
"callbacks": {
"on_episode_start": on_episode_start,
"on_episode_step": on_episode_step,
"on_episode_end": on_episode_end,
"on_sample_end": on_sample_end,
"on_train_result": on_train_result,
"on_postprocess_traj": on_postprocess_traj,
},
},
return_trials=True)
# verify custom metrics for integration tests
custom_metrics = trials[0].last_result["custom_metrics"]
print(custom_metrics)
assert "pole_angle_mean" in custom_metrics
assert "pole_angle_min" in custom_metrics
assert "pole_angle_max" in custom_metrics
assert "num_batches_mean" in custom_metrics
assert "callback_ok" in trials[0].last_result
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_tf_policy.py
|
Python
|
import argparse
import ray
from ray import tune
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.evaluation.postprocessing import discount
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--iters", type=int, default=200)
def policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return -tf.reduce_mean(
action_dist.logp(train_batch["actions"]) * train_batch["returns"])
def calculate_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
sample_batch["returns"] = discount(sample_batch["rewards"], 0.99)
return sample_batch
# <class 'ray.rllib.policy.tf_policy_template.MyTFPolicy'>
MyTFPolicy = build_tf_policy(
name="MyTFPolicy",
loss_fn=policy_gradient_loss,
postprocess_fn=calculate_advantages,
)
# <class 'ray.rllib.agents.trainer_template.MyCustomTrainer'>
MyTrainer = build_trainer(
name="MyCustomTrainer",
default_policy=MyTFPolicy,
)
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
tune.run(
MyTrainer,
stop={"training_iteration": args.iters},
config={
"env": "CartPole-v0",
"num_workers": 2,
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_torch_policy.py
|
Python
|
import argparse
import ray
from ray import tune
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy_template import build_torch_policy
parser = argparse.ArgumentParser()
parser.add_argument("--iters", type=int, default=200)
def policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model({SampleBatch.CUR_OBS: train_batch[SampleBatch.CUR_OBS]})
action_dist = dist_class(logits, model)
log_probs = action_dist.logp(train_batch[SampleBatch.ACTIONS])
return -train_batch[SampleBatch.REWARDS].dot(log_probs)
# <class 'ray.rllib.policy.torch_policy_template.MyTorchPolicy'>
MyTorchPolicy = build_torch_policy(
name="MyTorchPolicy", loss_fn=policy_gradient_loss)
# <class 'ray.rllib.agents.trainer_template.MyCustomTrainer'>
MyTrainer = build_trainer(
name="MyCustomTrainer",
default_policy=MyTorchPolicy,
)
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
tune.run(
MyTrainer,
stop={"training_iteration": args.iters},
config={
"env": "CartPole-v0",
"num_workers": 2,
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/custom_train_fn.py
|
Python
|
"""Example of a custom training workflow. Run this for a demo.
This example shows:
- using Tune trainable functions to implement custom training workflows
You can visualize experiment results in ~/ray_results using TensorBoard.
"""
import ray
from ray import tune
from ray.rllib.agents.ppo import PPOTrainer
def my_train_fn(config, reporter):
# Train for 100 iterations with high LR
agent1 = PPOTrainer(env="CartPole-v0", config=config)
for _ in range(10):
result = agent1.train()
result["phase"] = 1
reporter(**result)
phase1_time = result["timesteps_total"]
state = agent1.save()
agent1.stop()
# Train for 100 iterations with low LR
config["lr"] = 0.0001
agent2 = PPOTrainer(env="CartPole-v0", config=config)
agent2.restore(state)
for _ in range(10):
result = agent2.train()
result["phase"] = 2
result["timesteps_total"] += phase1_time # keep time moving forward
reporter(**result)
agent2.stop()
if __name__ == "__main__":
ray.init()
config = {
"lr": 0.01,
"num_workers": 0,
}
resources = PPOTrainer.default_resource_request(config).to_json()
tune.run(my_train_fn, resources_per_trial=resources, config=config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/dmlab_watermaze.py
|
Python
|
from deepmind_lab import dmenv_module
from ray.rllib import env
class Watermaze(env.DMEnv):
def __init__(self, env_config):
lab = dmenv_module.Lab(
"contributed/dmlab30/rooms_watermaze",
["RGBD"],
config=env_config,
)
super(Watermaze, self).__init__(lab)
env = Watermaze({"width": "320", "height": "160"})
print(env.action_space)
for i in range(2):
print(
env.step({
"CROUCH": 0.,
"FIRE": 0.,
"JUMP": 0.,
"LOOK_DOWN_UP_PIXELS_PER_FRAME": 0.,
"LOOK_LEFT_RIGHT_PIXELS_PER_FRAME": 0.,
"MOVE_BACK_FORWARD": 0.,
"STRAFE_LEFT_RIGHT": 0.
}))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/eager_execution.py
|
Python
|
import argparse
import random
import ray
from ray import tune
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--iters", type=int, default=200)
class EagerModel(Model):
"""Example of using embedded eager execution in a custom model.
This shows how to use tf.py_function() to execute a snippet of TF code
in eager mode. Here the `self.forward_eager` method just prints out
the intermediate tensor for debug purposes, but you can in general
perform any TF eager operation in tf.py_function().
"""
def _build_layers_v2(self, input_dict, num_outputs, options):
self.fcnet = FullyConnectedNetwork(input_dict, self.obs_space,
self.action_space, num_outputs,
options)
feature_out = tf.py_function(self.forward_eager,
[self.fcnet.last_layer], tf.float32)
with tf.control_dependencies([feature_out]):
return tf.identity(self.fcnet.outputs), feature_out
def forward_eager(self, feature_layer):
assert tf.executing_eagerly()
if random.random() > 0.99:
print("Eagerly printing the feature layer mean value",
tf.reduce_mean(feature_layer))
return feature_layer
def policy_gradient_loss(policy, model, dist_class, train_batch):
"""Example of using embedded eager execution in a custom loss.
Here `compute_penalty` prints the actions and rewards for debugging, and
also computes a (dummy) penalty term to add to the loss.
"""
def compute_penalty(actions, rewards):
assert tf.executing_eagerly()
penalty = tf.reduce_mean(tf.cast(actions, tf.float32))
if random.random() > 0.9:
print("The eagerly computed penalty is", penalty, actions, rewards)
return penalty
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
actions = train_batch[SampleBatch.ACTIONS]
rewards = train_batch[SampleBatch.REWARDS]
penalty = tf.py_function(
compute_penalty, [actions, rewards], Tout=tf.float32)
return penalty - tf.reduce_mean(action_dist.logp(actions) * rewards)
# <class 'ray.rllib.policy.tf_policy_template.MyTFPolicy'>
MyTFPolicy = build_tf_policy(
name="MyTFPolicy",
loss_fn=policy_gradient_loss,
)
# <class 'ray.rllib.agents.trainer_template.MyCustomTrainer'>
MyTrainer = build_trainer(
name="MyCustomTrainer",
default_policy=MyTFPolicy,
)
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
ModelCatalog.register_custom_model("eager_model", EagerModel)
tune.run(
MyTrainer,
stop={"training_iteration": args.iters},
config={
"env": "CartPole-v0",
"num_workers": 0,
"model": {
"custom_model": "eager_model"
},
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/export/cartpole_dqn_export.py
|
Python
|
#!/usr/bin/env python
import os
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
ray.init(num_cpus=10)
def train_and_export(algo_name, num_steps, model_dir, ckpt_dir, prefix):
cls = get_agent_class(algo_name)
alg = cls(config={}, env="CartPole-v0")
for _ in range(num_steps):
alg.train()
# Export tensorflow checkpoint for fine-tuning
alg.export_policy_checkpoint(ckpt_dir, filename_prefix=prefix)
# Export tensorflow SavedModel for online serving
alg.export_policy_model(model_dir)
def restore_saved_model(export_dir):
signature_key = \
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
g = tf.Graph()
with g.as_default():
with tf.Session(graph=g) as sess:
meta_graph_def = \
tf.saved_model.load(sess,
[tf.saved_model.tag_constants.SERVING],
export_dir)
print("Model restored!")
print("Signature Def Information:")
print(meta_graph_def.signature_def[signature_key])
print("You can inspect the model using TensorFlow SavedModel CLI.")
print("https://www.tensorflow.org/guide/saved_model")
def restore_checkpoint(export_dir, prefix):
sess = tf.Session()
meta_file = "%s.meta" % prefix
saver = tf.train.import_meta_graph(os.path.join(export_dir, meta_file))
saver.restore(sess, os.path.join(export_dir, prefix))
print("Checkpoint restored!")
print("Variables Information:")
for v in tf.trainable_variables():
value = sess.run(v)
print(v.name, value)
if __name__ == "__main__":
algo = "DQN"
model_dir = "/tmp/model_export_dir"
ckpt_dir = "/tmp/ckpt_export_dir"
prefix = "model.ckpt"
num_steps = 3
train_and_export(algo, num_steps, model_dir, ckpt_dir, prefix)
restore_saved_model(model_dir)
restore_checkpoint(ckpt_dir, prefix)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/hierarchical_training.py
|
Python
|
"""Example of hierarchical training using the multi-agent API.
The example env is that of a "windy maze". The agent observes the current wind
direction and can either choose to stand still, or move in that direction.
You can try out the env directly with:
$ python hierarchical_training.py --flat
A simple hierarchical formulation involves a high-level agent that issues goals
(i.e., go north / south / east / west), and a low-level agent that executes
these goals over a number of time-steps. This can be implemented as a
multi-agent environment with a top-level agent and low-level agents spawned
for each higher-level action. The lower level agent is rewarded for moving
in the right direction.
You can try this formulation with:
$ python hierarchical_training.py # gets ~100 rew after ~100k timesteps
Note that the hierarchical formulation actually converges slightly slower than
using --flat in this example.
"""
import argparse
import random
import gym
from gym.spaces import Box, Discrete, Tuple
import logging
import ray
from ray import tune
from ray.tune import function
from ray.rllib.env import MultiAgentEnv
parser = argparse.ArgumentParser()
parser.add_argument("--flat", action="store_true")
# Agent has to traverse the maze from the starting position S -> F
# Observation space [x_pos, y_pos, wind_direction]
# Action space: stay still OR move in current wind direction
MAP_DATA = """
#########
#S #
####### #
# #
# #
####### #
#F #
#########"""
logger = logging.getLogger(__name__)
class WindyMazeEnv(gym.Env):
def __init__(self, env_config):
self.map = [m for m in MAP_DATA.split("\n") if m]
self.x_dim = len(self.map)
self.y_dim = len(self.map[0])
logger.info("Loaded map {} {}".format(self.x_dim, self.y_dim))
for x in range(self.x_dim):
for y in range(self.y_dim):
if self.map[x][y] == "S":
self.start_pos = (x, y)
elif self.map[x][y] == "F":
self.end_pos = (x, y)
logger.info("Start pos {} end pos {}".format(self.start_pos,
self.end_pos))
self.observation_space = Tuple([
Box(0, 100, shape=(2, )), # (x, y)
Discrete(4), # wind direction (N, E, S, W)
])
self.action_space = Discrete(2) # whether to move or not
def reset(self):
self.wind_direction = random.choice([0, 1, 2, 3])
self.pos = self.start_pos
self.num_steps = 0
return [[self.pos[0], self.pos[1]], self.wind_direction]
def step(self, action):
if action == 1:
self.pos = self._get_new_pos(self.pos, self.wind_direction)
self.num_steps += 1
self.wind_direction = random.choice([0, 1, 2, 3])
at_goal = self.pos == self.end_pos
done = at_goal or self.num_steps >= 200
return ([[self.pos[0], self.pos[1]], self.wind_direction],
100 * int(at_goal), done, {})
def _get_new_pos(self, pos, direction):
if direction == 0:
new_pos = (pos[0] - 1, pos[1])
elif direction == 1:
new_pos = (pos[0], pos[1] + 1)
elif direction == 2:
new_pos = (pos[0] + 1, pos[1])
elif direction == 3:
new_pos = (pos[0], pos[1] - 1)
if (new_pos[0] >= 0 and new_pos[0] < self.x_dim and new_pos[1] >= 0
and new_pos[1] < self.y_dim
and self.map[new_pos[0]][new_pos[1]] != "#"):
return new_pos
else:
return pos # did not move
class HierarchicalWindyMazeEnv(MultiAgentEnv):
def __init__(self, env_config):
self.flat_env = WindyMazeEnv(env_config)
def reset(self):
self.cur_obs = self.flat_env.reset()
self.current_goal = None
self.steps_remaining_at_level = None
self.num_high_level_steps = 0
# current low level agent id. This must be unique for each high level
# step since agent ids cannot be reused.
self.low_level_agent_id = "low_level_{}".format(
self.num_high_level_steps)
return {
"high_level_agent": self.cur_obs,
}
def step(self, action_dict):
assert len(action_dict) == 1, action_dict
if "high_level_agent" in action_dict:
return self._high_level_step(action_dict["high_level_agent"])
else:
return self._low_level_step(list(action_dict.values())[0])
def _high_level_step(self, action):
logger.debug("High level agent sets goal".format(action))
self.current_goal = action
self.steps_remaining_at_level = 25
self.num_high_level_steps += 1
self.low_level_agent_id = "low_level_{}".format(
self.num_high_level_steps)
obs = {self.low_level_agent_id: [self.cur_obs, self.current_goal]}
rew = {self.low_level_agent_id: 0}
done = {"__all__": False}
return obs, rew, done, {}
def _low_level_step(self, action):
logger.debug("Low level agent step {}".format(action))
self.steps_remaining_at_level -= 1
cur_pos = tuple(self.cur_obs[0])
goal_pos = self.flat_env._get_new_pos(cur_pos, self.current_goal)
# Step in the actual env
f_obs, f_rew, f_done, _ = self.flat_env.step(action)
new_pos = tuple(f_obs[0])
self.cur_obs = f_obs
# Calculate low-level agent observation and reward
obs = {self.low_level_agent_id: [f_obs, self.current_goal]}
if new_pos != cur_pos:
if new_pos == goal_pos:
rew = {self.low_level_agent_id: 1}
else:
rew = {self.low_level_agent_id: -1}
else:
rew = {self.low_level_agent_id: 0}
# Handle env termination & transitions back to higher level
done = {"__all__": False}
if f_done:
done["__all__"] = True
logger.debug("high level final reward {}".format(f_rew))
rew["high_level_agent"] = f_rew
obs["high_level_agent"] = f_obs
elif self.steps_remaining_at_level == 0:
done[self.low_level_agent_id] = True
rew["high_level_agent"] = 0
obs["high_level_agent"] = f_obs
return obs, rew, done, {}
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
if args.flat:
tune.run(
"PPO",
config={
"env": WindyMazeEnv,
"num_workers": 0,
},
)
else:
maze = WindyMazeEnv(None)
def policy_mapping_fn(agent_id):
if agent_id.startswith("low_level_"):
return "low_level_policy"
else:
return "high_level_policy"
tune.run(
"PPO",
config={
"env": HierarchicalWindyMazeEnv,
"num_workers": 0,
"log_level": "INFO",
"entropy_coeff": 0.01,
"multiagent": {
"policies": {
"high_level_policy": (None, maze.observation_space,
Discrete(4), {
"gamma": 0.9
}),
"low_level_policy": (None,
Tuple([
maze.observation_space,
Discrete(4)
]), maze.action_space, {
"gamma": 0.0
}),
},
"policy_mapping_fn": function(policy_mapping_fn),
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/multiagent_cartpole.py
|
Python
|
"""Simple example of setting up a multi-agent policy mapping.
Control the number of agents and policies via --num-agents and --num-policies.
This works with hundreds of agents and policies, but note that initializing
many TF policies will take some time.
Also, TF evals might slow down with large numbers of policies. To debug TF
execution, set the TF_TIMELINE_DIR environment variable.
"""
import argparse
import gym
import random
import ray
from ray import tune
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.tests.test_multi_agent_env import MultiCartpole
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--num-agents", type=int, default=4)
parser.add_argument("--num-policies", type=int, default=2)
parser.add_argument("--num-iters", type=int, default=20)
parser.add_argument("--simple", action="store_true")
class CustomModel1(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
# Example of (optional) weight sharing between two different policies.
# Here, we share the variables defined in the 'shared' variable scope
# by entering it explicitly with tf.AUTO_REUSE. This creates the
# variables for the 'fc1' layer in a global scope called 'shared'
# outside of the policy's normal variable scope.
with tf.variable_scope(
tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(
input_dict["obs"], 64, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(
last_layer, 64, activation=tf.nn.relu, name="fc2")
output = tf.layers.dense(
last_layer, num_outputs, activation=None, name="fc_out")
return output, last_layer
class CustomModel2(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
# Weights shared with CustomModel1
with tf.variable_scope(
tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(
input_dict["obs"], 64, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(
last_layer, 64, activation=tf.nn.relu, name="fc2")
output = tf.layers.dense(
last_layer, num_outputs, activation=None, name="fc_out")
return output, last_layer
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Simple environment with `num_agents` independent cartpole entities
register_env("multi_cartpole", lambda _: MultiCartpole(args.num_agents))
ModelCatalog.register_custom_model("model1", CustomModel1)
ModelCatalog.register_custom_model("model2", CustomModel2)
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
# Each policy can have a different configuration (including custom model)
def gen_policy(i):
config = {
"model": {
"custom_model": ["model1", "model2"][i % 2],
},
"gamma": random.choice([0.95, 0.99]),
}
return (None, obs_space, act_space, config)
# Setup PPO with an ensemble of `num_policies` different policies
policies = {
"policy_{}".format(i): gen_policy(i)
for i in range(args.num_policies)
}
policy_ids = list(policies.keys())
tune.run(
"PPO",
stop={"training_iteration": args.num_iters},
config={
"env": "multi_cartpole",
"log_level": "DEBUG",
"simple_optimizer": args.simple,
"num_sgd_iter": 10,
"multiagent": {
"policies": policies,
"policy_mapping_fn": (
lambda agent_id: random.choice(policy_ids)),
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/multiagent_custom_policy.py
|
Python
|
"""Example of running a custom hand-coded policy alongside trainable policies.
This example has two policies:
(1) a simple PG policy
(2) a hand-coded policy that acts at random in the env (doesn't learn)
In the console output, you can see the PG policy does much better than random:
Result for PG_multi_cartpole_0:
...
policy_reward_mean:
pg_policy: 185.23
random: 21.255
...
"""
import argparse
import gym
import ray
from ray import tune
from ray.rllib.policy import Policy
from ray.rllib.tests.test_multi_agent_env import MultiCartpole
from ray.tune.registry import register_env
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=20)
class RandomPolicy(Policy):
"""Hand-coded policy that returns random actions."""
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Compute actions on a batch of observations."""
return [self.action_space.sample() for _ in obs_batch], [], {}
def learn_on_batch(self, samples):
"""No learning."""
return {}
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Simple environment with 4 independent cartpole entities
register_env("multi_cartpole", lambda _: MultiCartpole(4))
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
tune.run(
"PG",
stop={"training_iteration": args.num_iters},
config={
"env": "multi_cartpole",
"multiagent": {
"policies": {
"pg_policy": (None, obs_space, act_space, {}),
"random": (RandomPolicy, obs_space, act_space, {}),
},
"policy_mapping_fn": (
lambda agent_id: ["pg_policy", "random"][agent_id % 2]),
},
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/multiagent_two_trainers.py
|
Python
|
"""Example of using two different training methods at once in multi-agent.
Here we create a number of CartPole agents, some of which are trained with
DQN, and some of which are trained with PPO. We periodically sync weights
between the two trainers (note that no such syncing is needed when using just
a single training method).
For a simpler example, see also: multiagent_cartpole.py
"""
import argparse
import gym
import ray
from ray.rllib.agents.dqn.dqn import DQNTrainer
from ray.rllib.agents.dqn.dqn_policy import DQNTFPolicy
from ray.rllib.agents.ppo.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray.rllib.tests.test_multi_agent_env import MultiCartpole
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=20)
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Simple environment with 4 independent cartpole entities
register_env("multi_cartpole", lambda _: MultiCartpole(4))
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
# You can also have multiple policies per trainer, but here we just
# show one each for PPO and DQN.
policies = {
"ppo_policy": (PPOTFPolicy, obs_space, act_space, {}),
"dqn_policy": (DQNTFPolicy, obs_space, act_space, {}),
}
def policy_mapping_fn(agent_id):
if agent_id % 2 == 0:
return "ppo_policy"
else:
return "dqn_policy"
ppo_trainer = PPOTrainer(
env="multi_cartpole",
config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["ppo_policy"],
},
# disable filters, otherwise we would need to synchronize those
# as well to the DQN agent
"observation_filter": "NoFilter",
})
dqn_trainer = DQNTrainer(
env="multi_cartpole",
config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["dqn_policy"],
},
"gamma": 0.95,
"n_step": 3,
})
# disable DQN exploration when used by the PPO trainer
ppo_trainer.workers.foreach_worker(
lambda ev: ev.for_policy(
lambda pi: pi.set_epsilon(0.0), policy_id="dqn_policy"))
# You should see both the printed X and Y approach 200 as this trains:
# info:
# policy_reward_mean:
# dqn_policy: X
# ppo_policy: Y
for i in range(args.num_iters):
print("== Iteration", i, "==")
# improve the DQN policy
print("-- DQN --")
print(pretty_print(dqn_trainer.train()))
# improve the PPO policy
print("-- PPO --")
print(pretty_print(ppo_trainer.train()))
# swap weights to synchronize
dqn_trainer.set_weights(ppo_trainer.get_weights(["ppo_policy"]))
ppo_trainer.set_weights(dqn_trainer.get_weights(["dqn_policy"]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/parametric_action_cartpole.py
|
Python
|
"""Example of handling variable length and/or parametric action spaces.
This is a toy example of the action-embedding based approach for handling large
discrete action spaces (potentially infinite in size), similar to this:
https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/
This currently works with RLlib's policy gradient style algorithms
(e.g., PG, PPO, IMPALA, A2C) and also DQN.
Note that since the model outputs now include "-inf" tf.float32.min
values, not all algorithm options are supported at the moment. For example,
algorithms might crash if they don't properly ignore the -inf action scores.
Working configurations are given below.
"""
import argparse
import random
import numpy as np
import gym
from gym.spaces import Box, Discrete, Dict
import ray
from ray import tune
from ray.rllib.agents.dqn.distributional_q_model import DistributionalQModel
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=200)
parser.add_argument("--run", type=str, default="PPO")
class ParametricActionCartpole(gym.Env):
"""Parametric action version of CartPole.
In this env there are only ever two valid actions, but we pretend there are
actually up to `max_avail_actions` actions that can be taken, and the two
valid actions are randomly hidden among this set.
At each step, we emit a dict of:
- the actual cart observation
- a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)
- the list of action embeddings (w/ zeroes for invalid actions) (e.g.,
[[0, 0],
[0, 0],
[-0.2322, -0.2569],
[0, 0],
[0, 0],
[0.7878, 1.2297]] for max_avail_actions=6)
In a real environment, the actions embeddings would be larger than two
units of course, and also there would be a variable number of valid actions
per step instead of always [LEFT, RIGHT].
"""
def __init__(self, max_avail_actions):
# Use simple random 2-unit action embeddings for [LEFT, RIGHT]
self.left_action_embed = np.random.randn(2)
self.right_action_embed = np.random.randn(2)
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"action_mask": Box(0, 1, shape=(max_avail_actions, )),
"avail_actions": Box(-10, 10, shape=(max_avail_actions, 2)),
"cart": self.wrapped.observation_space,
})
def update_avail_actions(self):
self.action_assignments = np.array([[0., 0.]] * self.action_space.n)
self.action_mask = np.array([0.] * self.action_space.n)
self.left_idx, self.right_idx = random.sample(
range(self.action_space.n), 2)
self.action_assignments[self.left_idx] = self.left_action_embed
self.action_assignments[self.right_idx] = self.right_action_embed
self.action_mask[self.left_idx] = 1
self.action_mask[self.right_idx] = 1
def reset(self):
self.update_avail_actions()
return {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.action_assignments, self.action_mask,
self.left_idx, self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
self.update_avail_actions()
obs = {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": orig_obs,
}
return obs, rew, done, info
class ParametricActionsModel(DistributionalQModel, TFModelV2):
"""Parametric action model that handles the dot product and masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
super(ParametricActionsModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
self.action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
self.register_variables(self.action_embed_model.variables())
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(avail_actions * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(tf.log(action_mask), tf.float32.min)
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
if args.run == "DQN":
cfg = {
# TODO(ekl) we need to set these to prevent the masked values
# from being further processed in DistributionalQModel, which
# would mess up the masking. It is possible to support these if we
# defined a a custom DistributionalQModel that is aware of masking.
"hiddens": [],
"dueling": False,
}
else:
cfg = {}
tune.run(
args.run,
stop={
"episode_reward_mean": args.stop,
},
config=dict({
"env": "pa_cartpole",
"model": {
"custom_model": "pa_model",
},
"num_workers": 0,
}, **cfg),
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/rock_paper_scissors_multiagent.py
|
Python
|
"""A simple multi-agent env with two agents playing rock paper scissors.
This demonstrates running the following policies in competition:
(1) heuristic policy of repeating the same move
(2) heuristic policy of beating the last opponent move
(3) LSTM/feedforward PG policies
(4) LSTM policy with custom entropy loss
"""
import random
from gym.spaces import Discrete
from ray import tune
from ray.rllib.agents.pg.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
ROCK = 0
PAPER = 1
SCISSORS = 2
class RockPaperScissorsEnv(MultiAgentEnv):
"""Two-player environment for rock paper scissors.
The observation is simply the last opponent action."""
def __init__(self, _):
self.action_space = Discrete(3)
self.observation_space = Discrete(3)
self.player1 = "player1"
self.player2 = "player2"
self.last_move = None
self.num_moves = 0
def reset(self):
self.last_move = (0, 0)
self.num_moves = 0
return {
self.player1: self.last_move[1],
self.player2: self.last_move[0],
}
def step(self, action_dict):
move1 = action_dict[self.player1]
move2 = action_dict[self.player2]
self.last_move = (move1, move2)
obs = {
self.player1: self.last_move[1],
self.player2: self.last_move[0],
}
r1, r2 = {
(ROCK, ROCK): (0, 0),
(ROCK, PAPER): (-1, 1),
(ROCK, SCISSORS): (1, -1),
(PAPER, ROCK): (1, -1),
(PAPER, PAPER): (0, 0),
(PAPER, SCISSORS): (-1, 1),
(SCISSORS, ROCK): (-1, 1),
(SCISSORS, PAPER): (1, -1),
(SCISSORS, SCISSORS): (0, 0),
}[move1, move2]
rew = {
self.player1: r1,
self.player2: r2,
}
self.num_moves += 1
done = {
"__all__": self.num_moves >= 10,
}
return obs, rew, done, {}
class AlwaysSameHeuristic(Policy):
"""Pick a random move and stick with it for the entire episode."""
def get_initial_state(self):
return [random.choice([ROCK, PAPER, SCISSORS])]
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
return list(state_batches[0]), state_batches, {}
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass
class BeatLastHeuristic(Policy):
"""Play the move that would beat the last move of the opponent."""
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
def successor(x):
if x[ROCK] == 1:
return PAPER
elif x[PAPER] == 1:
return SCISSORS
elif x[SCISSORS] == 1:
return ROCK
return [successor(x) for x in obs_batch], [], {}
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass
def run_same_policy():
"""Use the same policy for both agents (trivial case)."""
tune.run("PG", config={"env": RockPaperScissorsEnv})
def run_heuristic_vs_learned(use_lstm=False, trainer="PG"):
"""Run heuristic policies vs a learned agent.
The learned agent should eventually reach a reward of ~5 with
use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy
can perform better is since it can distinguish between the always_same vs
beat_last heuristics.
"""
def select_policy(agent_id):
if agent_id == "player1":
return "learned"
else:
return random.choice(["always_same", "beat_last"])
tune.run(
trainer,
stop={"timesteps_total": 400000},
config={
"env": RockPaperScissorsEnv,
"gamma": 0.9,
"num_workers": 4,
"num_envs_per_worker": 4,
"sample_batch_size": 10,
"train_batch_size": 200,
"multiagent": {
"policies_to_train": ["learned"],
"policies": {
"always_same": (AlwaysSameHeuristic, Discrete(3),
Discrete(3), {}),
"beat_last": (BeatLastHeuristic, Discrete(3), Discrete(3),
{}),
"learned": (None, Discrete(3), Discrete(3), {
"model": {
"use_lstm": use_lstm
}
}),
},
"policy_mapping_fn": select_policy,
},
})
def run_with_custom_entropy_loss():
"""Example of customizing the loss function of an existing policy.
This performs about the same as the default loss does."""
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return (-0.1 * action_dist.entropy() - tf.reduce_mean(
action_dist.logp(train_batch["actions"]) *
train_batch["advantages"]))
EntropyPolicy = PGTFPolicy.with_updates(
loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(
name="EntropyPG", get_policy_class=lambda _: EntropyPolicy)
run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG)
if __name__ == "__main__":
# run_same_policy()
# run_heuristic_vs_learned(use_lstm=False)
run_heuristic_vs_learned(use_lstm=False)
# run_with_custom_entropy_loss()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/rollout_worker_custom_workflow.py
|
Python
|
"""Example of using rollout worker classes directly to implement training.
Instead of using the built-in Trainer classes provided by RLlib, here we define
a custom Policy class and manually coordinate distributed sample
collection and policy optimization.
"""
import argparse
import gym
import ray
from ray import tune
from ray.rllib.policy import Policy
from ray.rllib.evaluation import RolloutWorker, SampleBatch
from ray.rllib.evaluation.metrics import collect_metrics
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--num-iters", type=int, default=20)
parser.add_argument("--num-workers", type=int, default=2)
class CustomPolicy(Policy):
"""Example of a custom policy written from scratch.
You might find it more convenient to extend TF/TorchPolicy instead
for a real policy.
"""
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space, action_space, config)
# example parameter
self.w = 1.0
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
# return random actions
return [self.action_space.sample() for _ in obs_batch], [], {}
def learn_on_batch(self, samples):
# implement your learning code here
return {}
def update_some_value(self, w):
# can also call other methods on policies
self.w = w
def get_weights(self):
return {"w": self.w}
def set_weights(self, weights):
self.w = weights["w"]
def training_workflow(config, reporter):
# Setup policy and policy evaluation actors
env = gym.make("CartPole-v0")
policy = CustomPolicy(env.observation_space, env.action_space, {})
workers = [
RolloutWorker.as_remote().remote(lambda c: gym.make("CartPole-v0"),
CustomPolicy)
for _ in range(config["num_workers"])
]
for _ in range(config["num_iters"]):
# Broadcast weights to the policy evaluation workers
weights = ray.put({"default_policy": policy.get_weights()})
for w in workers:
w.set_weights.remote(weights)
# Gather a batch of samples
T1 = SampleBatch.concat_samples(
ray.get([w.sample.remote() for w in workers]))
# Update the remote policy replicas and gather another batch of samples
new_value = policy.w * 2.0
for w in workers:
w.for_policy.remote(lambda p: p.update_some_value(new_value))
# Gather another batch of samples
T2 = SampleBatch.concat_samples(
ray.get([w.sample.remote() for w in workers]))
# Improve the policy using the T1 batch
policy.learn_on_batch(T1)
# Do some arbitrary updates based on the T2 batch
policy.update_some_value(sum(T2["rewards"]))
reporter(**collect_metrics(remote_workers=workers))
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
tune.run(
training_workflow,
resources_per_trial={
"gpu": 1 if args.gpu else 0,
"cpu": 1,
"extra_cpu": args.num_workers,
},
config={
"num_workers": args.num_workers,
"num_iters": args.num_iters,
},
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/saving_experiences.py
|
Python
|
"""Simple example of writing experiences to a file using JsonWriter."""
# __sphinx_doc_begin__
import gym
import numpy as np
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
from ray.rllib.offline.json_writer import JsonWriter
if __name__ == "__main__":
batch_builder = SampleBatchBuilder() # or MultiAgentSampleBatchBuilder
writer = JsonWriter("/tmp/demo-out")
# You normally wouldn't want to manually create sample batches if a
# simulator is available, but let's do it anyways for example purposes:
env = gym.make("CartPole-v0")
# RLlib uses preprocessors to implement transforms such as one-hot encoding
# and flattening of tuple and dict observations. For CartPole a no-op
# preprocessor is used, but this may be relevant for more complex envs.
prep = get_preprocessor(env.observation_space)(env.observation_space)
print("The preprocessor is", prep)
for eps_id in range(100):
obs = env.reset()
prev_action = np.zeros_like(env.action_space.sample())
prev_reward = 0
done = False
t = 0
while not done:
action = env.action_space.sample()
new_obs, rew, done, info = env.step(action)
batch_builder.add_values(
t=t,
eps_id=eps_id,
agent_index=0,
obs=prep.transform(obs),
actions=action,
action_prob=1.0, # put the true action probability here
rewards=rew,
prev_actions=prev_action,
prev_rewards=prev_reward,
dones=done,
infos=info,
new_obs=prep.transform(new_obs))
obs = new_obs
prev_action = action
prev_reward = rew
t += 1
writer.write(batch_builder.build_and_reset())
# __sphinx_doc_end__
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/serving/cartpole_client.py
|
Python
|
"""Example of querying a policy server. Copy this file for your use case.
To try this out, in two separate shells run:
$ python cartpole_server.py
$ python cartpole_client.py
"""
import argparse
import gym
from ray.rllib.utils.policy_client import PolicyClient
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-train", action="store_true", help="Whether to disable training.")
parser.add_argument(
"--off-policy",
action="store_true",
help="Whether to take random instead of on-policy actions.")
parser.add_argument(
"--stop-at-reward",
type=int,
default=9999,
help="Stop once the specified reward is reached.")
if __name__ == "__main__":
args = parser.parse_args()
env = gym.make("CartPole-v0")
client = PolicyClient("http://localhost:9900")
eid = client.start_episode(training_enabled=not args.no_train)
obs = env.reset()
rewards = 0
while True:
if args.off_policy:
action = env.action_space.sample()
client.log_action(eid, obs, action)
else:
action = client.get_action(eid, obs)
obs, reward, done, info = env.step(action)
rewards += reward
client.log_returns(eid, reward, info=info)
if done:
print("Total reward:", rewards)
if rewards >= args.stop_at_reward:
print("Target reward achieved, exiting")
exit(0)
rewards = 0
client.end_episode(eid, obs)
obs = env.reset()
eid = client.start_episode(training_enabled=not args.no_train)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/serving/cartpole_server.py
|
Python
|
"""Example of running a policy server. Copy this file for your use case.
To try this out, in two separate shells run:
$ python cartpole_server.py
$ python cartpole_client.py
"""
import os
from gym import spaces
import numpy as np
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.utils.policy_server import PolicyServer
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
SERVER_ADDRESS = "localhost"
SERVER_PORT = 9900
CHECKPOINT_FILE = "last_checkpoint.out"
class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(low=-10, high=10, shape=(4, ), dtype=np.float32))
def run(self):
print("Starting policy server at {}:{}".format(SERVER_ADDRESS,
SERVER_PORT))
server = PolicyServer(self, SERVER_ADDRESS, SERVER_PORT)
server.serve_forever()
if __name__ == "__main__":
ray.init()
register_env("srv", lambda _: CartpoleServing())
# We use DQN since it supports off-policy actions, but you can choose and
# configure any agent.
dqn = DQNTrainer(
env="srv",
config={
# Use a single process to avoid needing to set up a load balancer
"num_workers": 0,
# Configure the agent to run short iterations for debugging
"exploration_fraction": 0.01,
"learning_starts": 100,
"timesteps_per_iteration": 200,
})
# Attempt to restore from checkpoint if possible.
if os.path.exists(CHECKPOINT_FILE):
checkpoint_path = open(CHECKPOINT_FILE).read()
print("Restoring from checkpoint path", checkpoint_path)
dqn.restore(checkpoint_path)
# Serving and training loop
while True:
print(pretty_print(dqn.train()))
checkpoint_path = dqn.save()
print("Last checkpoint", checkpoint_path)
with open(CHECKPOINT_FILE, "w") as f:
f.write(checkpoint_path)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/serving/test.sh
|
Shell
|
#!/bin/bash
pkill -f cartpole_server.py
(python cartpole_server.py 2>&1 | grep -v 200) &
pid=$!
while ! curl localhost:9900; do
sleep 1
done
python cartpole_client.py --stop-at-reward=100
kill $pid
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/examples/twostep_game.py
|
Python
|
"""The two-step game from QMIX: https://arxiv.org/pdf/1803.11485.pdf
Configurations you can try:
- normal policy gradients (PG)
- contrib/MADDPG
- QMIX
- APEX_QMIX
See also: centralized_critic.py for centralized critic PPO on this game.
"""
import argparse
from gym.spaces import Tuple, MultiDiscrete, Dict, Discrete
import numpy as np
import ray
from ray import tune
from ray.tune import register_env, grid_search
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.agents.qmix.qmix_policy import ENV_STATE
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=50000)
parser.add_argument("--run", type=str, default="PG")
class TwoStepGame(MultiAgentEnv):
action_space = Discrete(2)
def __init__(self, env_config):
self.state = None
self.agent_1 = 0
self.agent_2 = 1
# MADDPG emits action logits instead of actual discrete actions
self.actions_are_logits = env_config.get("actions_are_logits", False)
self.one_hot_state_encoding = env_config.get("one_hot_state_encoding",
False)
self.with_state = env_config.get("separate_state_space", False)
if not self.one_hot_state_encoding:
self.observation_space = Discrete(6)
self.with_state = False
else:
# Each agent gets the full state (one-hot encoding of which of the
# three states are active) as input with the receiving agent's
# ID (1 or 2) concatenated onto the end.
if self.with_state:
self.observation_space = Dict({
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2])
})
else:
self.observation_space = MultiDiscrete([2, 2, 2, 3])
def reset(self):
self.state = np.array([1, 0, 0])
return self._obs()
def step(self, action_dict):
if self.actions_are_logits:
action_dict = {
k: np.random.choice([0, 1], p=v)
for k, v in action_dict.items()
}
state_index = np.flatnonzero(self.state)
if state_index == 0:
action = action_dict[self.agent_1]
assert action in [0, 1], action
if action == 0:
self.state = np.array([0, 1, 0])
else:
self.state = np.array([0, 0, 1])
global_rew = 0
done = False
elif state_index == 1:
global_rew = 7
done = True
else:
if action_dict[self.agent_1] == 0 and action_dict[self.
agent_2] == 0:
global_rew = 0
elif action_dict[self.agent_1] == 1 and action_dict[self.
agent_2] == 1:
global_rew = 8
else:
global_rew = 1
done = True
rewards = {
self.agent_1: global_rew / 2.0,
self.agent_2: global_rew / 2.0
}
obs = self._obs()
dones = {"__all__": done}
infos = {}
return obs, rewards, dones, infos
def _obs(self):
if self.with_state:
return {
self.agent_1: {
"obs": self.agent_1_obs(),
ENV_STATE: self.state
},
self.agent_2: {
"obs": self.agent_2_obs(),
ENV_STATE: self.state
}
}
else:
return {
self.agent_1: self.agent_1_obs(),
self.agent_2: self.agent_2_obs()
}
def agent_1_obs(self):
if self.one_hot_state_encoding:
return np.concatenate([self.state, [1]])
else:
return np.flatnonzero(self.state)[0]
def agent_2_obs(self):
if self.one_hot_state_encoding:
return np.concatenate([self.state, [2]])
else:
return np.flatnonzero(self.state)[0] + 3
if __name__ == "__main__":
args = parser.parse_args()
grouping = {
"group_1": [0, 1],
}
obs_space = Tuple([
Dict({
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2])
}),
Dict({
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2])
}),
])
act_space = Tuple([
TwoStepGame.action_space,
TwoStepGame.action_space,
])
register_env(
"grouped_twostep",
lambda config: TwoStepGame(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
if args.run == "contrib/MADDPG":
obs_space_dict = {
"agent_1": Discrete(6),
"agent_2": Discrete(6),
}
act_space_dict = {
"agent_1": TwoStepGame.action_space,
"agent_2": TwoStepGame.action_space,
}
config = {
"learning_starts": 100,
"env_config": {
"actions_are_logits": True,
},
"multiagent": {
"policies": {
"pol1": (None, Discrete(6), TwoStepGame.action_space, {
"agent_id": 0,
}),
"pol2": (None, Discrete(6), TwoStepGame.action_space, {
"agent_id": 1,
}),
},
"policy_mapping_fn": lambda x: "pol1" if x == 0 else "pol2",
},
}
group = False
elif args.run == "QMIX":
config = {
"sample_batch_size": 4,
"train_batch_size": 32,
"exploration_fraction": .4,
"exploration_final_eps": 0.0,
"num_workers": 0,
"mixer": grid_search([None, "qmix", "vdn"]),
"env_config": {
"separate_state_space": True,
"one_hot_state_encoding": True
},
}
group = True
elif args.run == "APEX_QMIX":
config = {
"num_gpus": 0,
"num_workers": 2,
"optimizer": {
"num_replay_buffer_shards": 1,
},
"min_iter_time_s": 3,
"buffer_size": 1000,
"learning_starts": 1000,
"train_batch_size": 128,
"sample_batch_size": 32,
"target_network_update_freq": 500,
"timesteps_per_iteration": 1000,
"env_config": {
"separate_state_space": True,
"one_hot_state_encoding": True
},
}
group = True
else:
config = {}
group = False
ray.init()
tune.run(
args.run,
stop={
"timesteps_total": args.stop,
},
config=dict(config, **{
"env": "grouped_twostep" if group else TwoStepGame,
}),
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/__init__.py
|
Python
|
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.catalog import ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.model import Model
from ray.rllib.models.preprocessors import Preprocessor
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.tf.visionnet_v1 import VisionNetwork
__all__ = [
"ActionDistribution",
"ModelCatalog",
"Model",
"Preprocessor",
"MODEL_DEFAULTS",
"FullyConnectedNetwork", # legacy
"VisionNetwork", # legacy
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/action_dist.py
|
Python
|
from ray.rllib.utils.annotations import DeveloperAPI
@DeveloperAPI
class ActionDistribution:
"""The policy action distribution of an agent.
Attributes:
inputs (Tensors): input vector to compute samples from.
model (ModelV2): reference to model producing the inputs.
"""
@DeveloperAPI
def __init__(self, inputs, model):
"""Initialize the action dist.
Arguments:
inputs (Tensors): input vector to compute samples from.
model (ModelV2): reference to model producing the inputs. This
is mainly useful if you want to use model variables to compute
action outputs (i.e., for auto-regressive action distributions,
see examples/autoregressive_action_dist.py).
"""
self.inputs = inputs
self.model = model
@DeveloperAPI
def sample(self):
"""Draw a sample from the action distribution."""
raise NotImplementedError
@DeveloperAPI
def sampled_action_logp(self):
"""Returns the log probability of the last sampled action."""
raise NotImplementedError
@DeveloperAPI
def logp(self, x):
"""The log-likelihood of the action distribution."""
raise NotImplementedError
@DeveloperAPI
def kl(self, other):
"""The KL-divergence between two action distributions."""
raise NotImplementedError
@DeveloperAPI
def entropy(self):
"""The entropy of the action distribution."""
raise NotImplementedError
def multi_kl(self, other):
"""The KL-divergence between two action distributions.
This differs from kl() in that it can return an array for
MultiDiscrete. TODO(ekl) consider removing this.
"""
return self.kl(other)
def multi_entropy(self):
"""The entropy of the action distribution.
This differs from entropy() in that it can return an array for
MultiDiscrete. TODO(ekl) consider removing this.
"""
return self.entropy()
@DeveloperAPI
@staticmethod
def required_model_output_shape(action_space, model_config):
"""Returns the required shape of an input parameter tensor for a
particular action space and an optional dict of distribution-specific
options.
Args:
action_space (gym.Space): The action space this distribution will
be used for, whose shape attributes will be used to determine
the required shape of the input parameter tensor.
model_config (dict): Model's config dict (as defined in catalog.py)
Returns:
model_output_shape (int or np.ndarray of ints): size of the
required input vector (minus leading batch dimension).
"""
raise NotImplementedError
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/catalog.py
|
Python
|
import gym
import logging
import numpy as np
from functools import partial
from ray.tune.registry import RLLIB_MODEL, RLLIB_PREPROCESSOR, \
RLLIB_ACTION_DIST, _global_registry
from ray.rllib.models.extra_spaces import Simplex
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDiagGaussian)
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork as FCNetV2
from ray.rllib.models.tf.visionnet_v2 import VisionNetwork as VisionNetV2
from ray.rllib.models.tf.tf_action_dist import (
Categorical, MultiCategorical, Deterministic, DiagGaussian,
MultiActionDistribution, Dirichlet)
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.tf.lstm_v1 import LSTM
from ray.rllib.models.tf.modelv1_compat import make_v1_wrapper
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.visionnet_v1 import VisionNetwork
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
from ray.rllib.utils.error import UnsupportedSpaceException
tf = try_import_tf()
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
MODEL_DEFAULTS = {
# === Built-in options ===
# Filter config. List of [out_channels, kernel, stride] for each filter
"conv_filters": None,
# Nonlinearity for built-in convnet
"conv_activation": "relu",
# Nonlinearity for fully connected net (tanh, relu)
"fcnet_activation": "tanh",
# Number of hidden layers for fully connected net
"fcnet_hiddens": [256, 256],
# For control envs, documented in ray.rllib.models.Model
"free_log_std": False,
# Whether to skip the final linear layer used to resize the hidden layer
# outputs to size `num_outputs`. If True, then the last hidden layer
# should already match num_outputs.
"no_final_linear": False,
# Whether layers should be shared for the value function.
"vf_share_layers": True,
# == LSTM ==
# Whether to wrap the model with a LSTM
"use_lstm": False,
# Max seq len for training the LSTM, defaults to 20
"max_seq_len": 20,
# Size of the LSTM cell
"lstm_cell_size": 256,
# Whether to feed a_{t-1}, r_{t-1} to LSTM
"lstm_use_prev_action_reward": False,
# When using modelv1 models with a modelv2 algorithm, you may have to
# define the state shape here (e.g., [256, 256]).
"state_shape": None,
# == Atari ==
# Whether to enable framestack for Atari envs
"framestack": True,
# Final resized frame dimension
"dim": 84,
# (deprecated) Converts ATARI frame to 1 Channel Grayscale image
"grayscale": False,
# (deprecated) Changes frame to range from [-1, 1] if true
"zero_mean": True,
# === Options for custom models ===
# Name of a custom model to use
"custom_model": None,
# Name of a custom action distribution to use
"custom_action_dist": None,
# Extra options to pass to the custom classes
"custom_options": {},
# Custom preprocessors are deprecated. Please use a wrapper class around
# your environment instead to preprocess observations.
"custom_preprocessor": None,
}
# __sphinx_doc_end__
# yapf: enable
@PublicAPI
class ModelCatalog:
"""Registry of models, preprocessors, and action distributions for envs.
Examples:
>>> prep = ModelCatalog.get_preprocessor(env)
>>> observation = prep.transform(raw_observation)
>>> dist_class, dist_dim = ModelCatalog.get_action_dist(
env.action_space, {})
>>> model = ModelCatalog.get_model(inputs, dist_dim, options)
>>> dist = dist_class(model.outputs, model)
>>> action = dist.sample()
"""
@staticmethod
@DeveloperAPI
def get_action_dist(action_space, config, dist_type=None, torch=False):
"""Returns action distribution class and size for the given action space.
Args:
action_space (Space): Action space of the target gym env.
config (dict): Optional model config.
dist_type (str): Optional identifier of the action distribution.
torch (bool): Optional whether to return PyTorch distribution.
Returns:
dist_class (ActionDistribution): Python class of the distribution.
dist_dim (int): The size of the input vector to the distribution.
"""
config = config or MODEL_DEFAULTS
if config.get("custom_action_dist"):
action_dist_name = config["custom_action_dist"]
logger.debug(
"Using custom action distribution {}".format(action_dist_name))
dist = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)
elif isinstance(action_space, gym.spaces.Box):
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a custom action distribution, "
"using a Tuple action space, or the multi-agent API.")
if dist_type is None:
dist = TorchDiagGaussian if torch else DiagGaussian
elif dist_type == "deterministic":
dist = Deterministic
elif isinstance(action_space, gym.spaces.Discrete):
dist = TorchCategorical if torch else Categorical
elif isinstance(action_space, gym.spaces.Tuple):
if torch:
raise NotImplementedError("Tuple action spaces not supported "
"for Pytorch.")
child_dist = []
input_lens = []
for action in action_space.spaces:
dist, action_size = ModelCatalog.get_action_dist(
action, config)
child_dist.append(dist)
input_lens.append(action_size)
return partial(
MultiActionDistribution,
child_distributions=child_dist,
action_space=action_space,
input_lens=input_lens), sum(input_lens)
elif isinstance(action_space, Simplex):
if torch:
raise NotImplementedError("Simplex action spaces not "
"supported for Pytorch.")
dist = Dirichlet
elif isinstance(action_space, gym.spaces.MultiDiscrete):
if torch:
raise NotImplementedError("MultiDiscrete action spaces not "
"supported for Pytorch.")
return partial(MultiCategorical, input_lens=action_space.nvec), \
int(sum(action_space.nvec))
elif isinstance(action_space, gym.spaces.Dict):
raise NotImplementedError(
"Dict action spaces are not supported, consider using "
"gym.spaces.Tuple instead")
return dist, dist.required_model_output_shape(action_space, config)
raise NotImplementedError("Unsupported args: {} {}".format(
action_space, dist_type))
@staticmethod
@DeveloperAPI
def get_action_shape(action_space):
"""Returns action tensor dtype and shape for the action space.
Args:
action_space (Space): Action space of the target gym env.
Returns:
(dtype, shape): Dtype and shape of the actions tensor.
"""
if isinstance(action_space, gym.spaces.Discrete):
return (tf.int64, (None, ))
elif isinstance(action_space, (gym.spaces.Box, Simplex)):
return (tf.float32, (None, ) + action_space.shape)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
return (tf.as_dtype(action_space.dtype),
(None, ) + action_space.shape)
elif isinstance(action_space, gym.spaces.Tuple):
size = 0
all_discrete = True
for i in range(len(action_space.spaces)):
if isinstance(action_space.spaces[i], gym.spaces.Discrete):
size += 1
else:
all_discrete = False
size += np.product(action_space.spaces[i].shape)
return (tf.int64 if all_discrete else tf.float32, (None, size))
elif isinstance(action_space, gym.spaces.Dict):
raise NotImplementedError(
"Dict action spaces are not supported, consider using "
"gym.spaces.Tuple instead")
else:
raise NotImplementedError("action space {}"
" not supported".format(action_space))
@staticmethod
@DeveloperAPI
def get_action_placeholder(action_space):
"""Returns an action placeholder consistent with the action space
Args:
action_space (Space): Action space of the target gym env.
Returns:
action_placeholder (Tensor): A placeholder for the actions
"""
dtype, shape = ModelCatalog.get_action_shape(action_space)
return tf.placeholder(dtype, shape=shape, name="action")
@staticmethod
@DeveloperAPI
def get_model_v2(obs_space,
action_space,
num_outputs,
model_config,
framework,
name="default_model",
model_interface=None,
default_model=None,
**model_kwargs):
"""Returns a suitable model compatible with given spaces and output.
Args:
obs_space (Space): Observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space (Space): Action space of the target gym env.
num_outputs (int): The size of the output vector of the model.
framework (str): Either "tf" or "torch".
name (str): Name (scope) for the model.
model_interface (cls): Interface required for the model
default_model (cls): Override the default class for the model. This
only has an effect when not using a custom model
model_kwargs (dict): args to pass to the ModelV2 constructor
Returns:
model (ModelV2): Model to use for the policy.
"""
if model_config.get("custom_model"):
model_cls = _global_registry.get(RLLIB_MODEL,
model_config["custom_model"])
if issubclass(model_cls, ModelV2):
if framework == "tf":
logger.info("Wrapping {} as {}".format(
model_cls, model_interface))
model_cls = ModelCatalog._wrap_if_needed(
model_cls, model_interface)
created = set()
# Track and warn if vars were created but not registered
def track_var_creation(next_creator, **kw):
v = next_creator(**kw)
created.add(v)
return v
with tf.variable_creator_scope(track_var_creation):
instance = model_cls(obs_space, action_space,
num_outputs, model_config, name,
**model_kwargs)
registered = set(instance.variables())
not_registered = set()
for var in created:
if var not in registered:
not_registered.add(var)
if not_registered:
raise ValueError(
"It looks like variables {} were created as part "
"of {} but does not appear in model.variables() "
"({}). Did you forget to call "
"model.register_variables() on the variables in "
"question?".format(not_registered, instance,
registered))
else:
# no variable tracking
instance = model_cls(obs_space, action_space, num_outputs,
model_config, name, **model_kwargs)
return instance
elif tf.executing_eagerly():
raise ValueError(
"Eager execution requires a TFModelV2 model to be "
"used, however you specified a custom model {}".format(
model_cls))
if framework == "tf":
v2_class = None
# try to get a default v2 model
if not model_config.get("custom_model"):
v2_class = default_model or ModelCatalog._get_v2_model(
obs_space, model_config)
# fallback to a default v1 model
if v2_class is None:
if tf.executing_eagerly():
raise ValueError(
"Eager execution requires a TFModelV2 model to be "
"used, however there is no default V2 model for this "
"observation space: {}, use_lstm={}".format(
obs_space, model_config.get("use_lstm")))
v2_class = make_v1_wrapper(ModelCatalog.get_model)
# wrap in the requested interface
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(obs_space, action_space, num_outputs, model_config,
name, **model_kwargs)
elif framework == "torch":
if default_model:
return default_model(obs_space, action_space, num_outputs,
model_config, name)
return ModelCatalog._get_default_torch_model_v2(
obs_space, action_space, num_outputs, model_config, name)
else:
raise NotImplementedError(
"Framework must be 'tf' or 'torch': {}".format(framework))
@staticmethod
@DeveloperAPI
def get_preprocessor(env, options=None):
"""Returns a suitable preprocessor for the given env.
This is a wrapper for get_preprocessor_for_space().
"""
return ModelCatalog.get_preprocessor_for_space(env.observation_space,
options)
@staticmethod
@DeveloperAPI
def get_preprocessor_for_space(observation_space, options=None):
"""Returns a suitable preprocessor for the given observation space.
Args:
observation_space (Space): The input observation space.
options (dict): Options to pass to the preprocessor.
Returns:
preprocessor (Preprocessor): Preprocessor for the observations.
"""
options = options or MODEL_DEFAULTS
for k in options.keys():
if k not in MODEL_DEFAULTS:
raise Exception("Unknown config key `{}`, all keys: {}".format(
k, list(MODEL_DEFAULTS)))
if options.get("custom_preprocessor"):
preprocessor = options["custom_preprocessor"]
logger.info("Using custom preprocessor {}".format(preprocessor))
logger.warning(
"DeprecationWarning: Custom preprocessors are deprecated, "
"since they sometimes conflict with the built-in "
"preprocessors for handling complex observation spaces. "
"Please use wrapper classes around your environment "
"instead of preprocessors.")
prep = _global_registry.get(RLLIB_PREPROCESSOR, preprocessor)(
observation_space, options)
else:
cls = get_preprocessor(observation_space)
prep = cls(observation_space, options)
logger.debug("Created preprocessor {}: {} -> {}".format(
prep, observation_space, prep.shape))
return prep
@staticmethod
@PublicAPI
def register_custom_preprocessor(preprocessor_name, preprocessor_class):
"""Register a custom preprocessor class by name.
The preprocessor can be later used by specifying
{"custom_preprocessor": preprocesor_name} in the model config.
Args:
preprocessor_name (str): Name to register the preprocessor under.
preprocessor_class (type): Python class of the preprocessor.
"""
_global_registry.register(RLLIB_PREPROCESSOR, preprocessor_name,
preprocessor_class)
@staticmethod
@PublicAPI
def register_custom_model(model_name, model_class):
"""Register a custom model class by name.
The model can be later used by specifying {"custom_model": model_name}
in the model config.
Args:
model_name (str): Name to register the model under.
model_class (type): Python class of the model.
"""
_global_registry.register(RLLIB_MODEL, model_name, model_class)
@staticmethod
@PublicAPI
def register_custom_action_dist(action_dist_name, action_dist_class):
"""Register a custom action distribution class by name.
The model can be later used by specifying
{"custom_action_dist": action_dist_name} in the model config.
Args:
model_name (str): Name to register the action distribution under.
model_class (type): Python class of the action distribution.
"""
_global_registry.register(RLLIB_ACTION_DIST, action_dist_name,
action_dist_class)
@staticmethod
def _wrap_if_needed(model_cls, model_interface):
assert issubclass(model_cls, TFModelV2), model_cls
if not model_interface or issubclass(model_cls, model_interface):
return model_cls
class wrapper(model_interface, model_cls):
pass
name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__)
wrapper.__name__ = name
wrapper.__qualname__ = name
return wrapper
@staticmethod
def _get_default_torch_model_v2(obs_space, action_space, num_outputs,
model_config, name):
from ray.rllib.models.torch.fcnet import (FullyConnectedNetwork as
PyTorchFCNet)
from ray.rllib.models.torch.visionnet import (VisionNetwork as
PyTorchVisionNet)
model_config = model_config or MODEL_DEFAULTS
if model_config.get("use_lstm"):
raise NotImplementedError(
"LSTM auto-wrapping not implemented for torch")
if isinstance(obs_space, gym.spaces.Discrete):
obs_rank = 1
else:
obs_rank = len(obs_space.shape)
if obs_rank > 2:
return PyTorchVisionNet(obs_space, action_space, num_outputs,
model_config, name)
return PyTorchFCNet(obs_space, action_space, num_outputs, model_config,
name)
@staticmethod
def get_model(input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=None,
seq_lens=None):
"""Deprecated: use get_model_v2() instead."""
assert isinstance(input_dict, dict)
options = options or MODEL_DEFAULTS
model = ModelCatalog._get_model(input_dict, obs_space, action_space,
num_outputs, options, state_in,
seq_lens)
if options.get("use_lstm"):
copy = dict(input_dict)
copy["obs"] = model.last_layer
feature_space = gym.spaces.Box(
-1, 1, shape=(model.last_layer.shape[1], ))
model = LSTM(copy, feature_space, action_space, num_outputs,
options, state_in, seq_lens)
logger.debug(
"Created model {}: ({} of {}, {}, {}, {}) -> {}, {}".format(
model, input_dict, obs_space, action_space, state_in, seq_lens,
model.outputs, model.state_out))
model._validate_output_shape()
return model
@staticmethod
def _get_model(input_dict, obs_space, action_space, num_outputs, options,
state_in, seq_lens):
if options.get("custom_model"):
model = options["custom_model"]
logger.debug("Using custom model {}".format(model))
return _global_registry.get(RLLIB_MODEL, model)(
input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=state_in,
seq_lens=seq_lens)
obs_rank = len(input_dict["obs"].shape) - 1 # drops batch dim
if obs_rank > 2:
return VisionNetwork(input_dict, obs_space, action_space,
num_outputs, options)
return FullyConnectedNetwork(input_dict, obs_space, action_space,
num_outputs, options)
@staticmethod
def _get_v2_model(obs_space, options):
options = options or MODEL_DEFAULTS
obs_rank = len(obs_space.shape)
if options.get("use_lstm"):
return None # TODO: default LSTM v2 not implemented
if obs_rank > 2:
return VisionNetV2
return FCNetV2
@staticmethod
def get_torch_model(obs_space,
num_outputs,
options=None,
default_model_cls=None):
raise DeprecationWarning("Please use get_model_v2() instead.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/extra_spaces.py
|
Python
|
import numpy as np
import gym
class Simplex(gym.Space):
"""Represents a d - 1 dimensional Simplex in R^d.
That is, all coordinates are in [0, 1] and sum to 1.
The dimension d of the simplex is assumed to be shape[-1].
Additionally one can specify the underlying distribution of
the simplex as a Dirichlet distribution by providing concentration
parameters. By default, sampling is uniform, i.e. concentration is
all 1s.
Example usage:
self.action_space = spaces.Simplex(shape=(3, 4))
--> 3 independent 4d Dirichlet with uniform concentration
"""
def __init__(self, shape, concentration=None, dtype=np.float32):
assert type(shape) in [tuple, list]
self.shape = shape
self.dtype = dtype
self.dim = shape[-1]
if concentration is not None:
assert concentration.shape == shape[:-1]
else:
self.concentration = [1] * self.dim
super().__init__(shape, dtype)
self.np_random = np.random.RandomState()
def seed(self, seed):
self.np_random.seed(seed)
def sample(self):
return np.random.dirichlet(
self.concentration, size=self.shape[:-1]).astype(self.dtype)
def contains(self, x):
return x.shape == self.shape and np.allclose(
np.sum(x, axis=-1), np.ones_like(x[..., 0]))
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return "Simplex({}; {})".format(self.shape, self.concentration)
def __eq__(self, other):
return np.allclose(self.concentration,
other.concentration) and self.shape == other.shape
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/model.py
|
Python
|
from collections import OrderedDict
import logging
import gym
from ray.rllib.models.tf.misc import linear, normc_initializer
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
class Model:
"""This class is deprecated, please use TFModelV2 instead."""
def __init__(self,
input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=None,
seq_lens=None):
assert isinstance(input_dict, dict), input_dict
# Default attribute values for the non-RNN case
self.state_init = []
self.state_in = state_in or []
self.state_out = []
self.obs_space = obs_space
self.action_space = action_space
self.num_outputs = num_outputs
self.options = options
self.scope = tf.get_variable_scope()
self.session = tf.get_default_session()
self.input_dict = input_dict
if seq_lens is not None:
self.seq_lens = seq_lens
else:
self.seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
self._num_outputs = num_outputs
if options.get("free_log_std"):
assert num_outputs % 2 == 0
num_outputs = num_outputs // 2
ok = True
try:
restored = input_dict.copy()
restored["obs"] = restore_original_dimensions(
input_dict["obs"], obs_space)
self.outputs, self.last_layer = self._build_layers_v2(
restored, num_outputs, options)
except NotImplementedError:
ok = False
# In TF 1.14, you cannot construct variable scopes in exception
# handlers so we have to set the OK flag and check it here:
if not ok:
self.outputs, self.last_layer = self._build_layers(
input_dict["obs"], num_outputs, options)
if options.get("free_log_std", False):
log_std = tf.get_variable(
name="log_std",
shape=[num_outputs],
initializer=tf.zeros_initializer)
self.outputs = tf.concat(
[self.outputs, 0.0 * self.outputs + log_std], 1)
def _build_layers(self, inputs, num_outputs, options):
"""Builds and returns the output and last layer of the network.
Deprecated: use _build_layers_v2 instead, which has better support
for dict and tuple spaces.
"""
raise NotImplementedError
@PublicAPI
def _build_layers_v2(self, input_dict, num_outputs, options):
"""Define the layers of a custom model.
Arguments:
input_dict (dict): Dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training".
num_outputs (int): Output tensor must be of size
[BATCH_SIZE, num_outputs].
options (dict): Model options.
Returns:
(outputs, feature_layer): Tensors of size [BATCH_SIZE, num_outputs]
and [BATCH_SIZE, desired_feature_size].
When using dict or tuple observation spaces, you can access
the nested sub-observation batches here as well:
Examples:
>>> print(input_dict)
{'prev_actions': <tf.Tensor shape=(?,) dtype=int64>,
'prev_rewards': <tf.Tensor shape=(?,) dtype=float32>,
'is_training': <tf.Tensor shape=(), dtype=bool>,
'obs': OrderedDict([
('sensors', OrderedDict([
('front_cam', [
<tf.Tensor shape=(?, 10, 10, 3) dtype=float32>,
<tf.Tensor shape=(?, 10, 10, 3) dtype=float32>]),
('position', <tf.Tensor shape=(?, 3) dtype=float32>),
('velocity', <tf.Tensor shape=(?, 3) dtype=float32>)]))])}
"""
raise NotImplementedError
@PublicAPI
def value_function(self):
"""Builds the value function output.
This method can be overridden to customize the implementation of the
value function (e.g., not sharing hidden layers).
Returns:
Tensor of size [BATCH_SIZE] for the value function.
"""
return tf.reshape(
linear(self.last_layer, 1, "value", normc_initializer(1.0)), [-1])
@PublicAPI
def custom_loss(self, policy_loss, loss_inputs):
"""Override to customize the loss function used to optimize this model.
This can be used to incorporate self-supervised losses (by defining
a loss over existing input and output tensors of this model), and
supervised losses (by defining losses over a variable-sharing copy of
this model's layers).
You can find an runnable example in examples/custom_loss.py.
Arguments:
policy_loss (Tensor): scalar policy loss from the policy.
loss_inputs (dict): map of input placeholders for rollout data.
Returns:
Scalar tensor for the customized loss for this model.
"""
if self.loss() is not None:
raise DeprecationWarning(
"self.loss() is deprecated, use self.custom_loss() instead.")
return policy_loss
@PublicAPI
def custom_stats(self):
"""Override to return custom metrics from your model.
The stats will be reported as part of the learner stats, i.e.,
info:
learner:
model:
key1: metric1
key2: metric2
Returns:
Dict of string keys to scalar tensors.
"""
return {}
def loss(self):
"""Deprecated: use self.custom_loss()."""
return None
@classmethod
def get_initial_state(cls, obs_space, action_space, num_outputs, options):
raise NotImplementedError(
"In order to use recurrent models with ModelV2, you should define "
"the get_initial_state @classmethod on your custom model class.")
def _validate_output_shape(self):
"""Checks that the model has the correct number of outputs."""
try:
out = tf.convert_to_tensor(self.outputs)
shape = out.shape.as_list()
except Exception:
raise ValueError("Output is not a tensor: {}".format(self.outputs))
else:
if len(shape) != 2 or shape[1] != self._num_outputs:
raise ValueError(
"Expected output shape of [None, {}], got {}".format(
self._num_outputs, shape))
@DeveloperAPI
def flatten(obs, framework):
"""Flatten the given tensor."""
if framework == "tf":
return tf.layers.flatten(obs)
elif framework == "torch":
assert torch is not None
return torch.flatten(obs, start_dim=1)
else:
raise NotImplementedError("flatten", framework)
@DeveloperAPI
def restore_original_dimensions(obs, obs_space, tensorlib=tf):
"""Unpacks Dict and Tuple space observations into their original form.
This is needed since we flatten Dict and Tuple observations in transit.
Before sending them to the model though, we should unflatten them into
Dicts or Tuples of tensors.
Arguments:
obs: The flattened observation tensor.
obs_space: The flattened obs space. If this has the `original_space`
attribute, we will unflatten the tensor to that shape.
tensorlib: The library used to unflatten (reshape) the array/tensor.
Returns:
single tensor or dict / tuple of tensors matching the original
observation space.
"""
if hasattr(obs_space, "original_space"):
if tensorlib == "tf":
tensorlib = tf
elif tensorlib == "torch":
assert torch is not None
tensorlib = torch
return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib)
else:
return obs
# Cache of preprocessors, for if the user is calling unpack obs often.
_cache = {}
def _unpack_obs(obs, space, tensorlib=tf):
"""Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if (isinstance(space, gym.spaces.Dict)
or isinstance(space, gym.spaces.Tuple)):
if id(space) in _cache:
prep = _cache[id(space)]
else:
prep = get_preprocessor(space)(space)
# Make an attempt to cache the result, if enough space left.
if len(_cache) < 999:
_cache[id(space)] = prep
if len(obs.shape) != 2 or obs.shape[1] != prep.shape[0]:
raise ValueError(
"Expected flattened obs shape of [None, {}], got {}".format(
prep.shape[0], obs.shape))
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
offset = 0
if isinstance(space, gym.spaces.Tuple):
u = []
for p, v in zip(prep.preprocessors, space.spaces):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u.append(
_unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib))
else:
u = OrderedDict()
for p, (k, v) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib)
return u
else:
return obs
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/modelv2.py
|
Python
|
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.model import restore_original_dimensions, flatten
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class ModelV2:
"""Defines a Keras-style abstract network model for use with RLlib.
Custom models should extend either TFModelV2 or TorchModelV2 instead of
this class directly.
Data flow:
obs -> forward() -> model_out
value_function() -> V(s)
Attributes:
obs_space (Space): observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space (Space): action space of the target gym env
num_outputs (int): number of output units of the model
model_config (dict): config for the model, documented in ModelCatalog
name (str): name (scope) for the model
framework (str): either "tf" or "torch"
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, framework):
"""Initialize the model.
This method should create any variables used by the model.
"""
self.obs_space = obs_space
self.action_space = action_space
self.num_outputs = num_outputs
self.model_config = model_config
self.name = name or "default_model"
self.framework = framework
self._last_output = None
def get_initial_state(self):
"""Get the initial recurrent state values for the model.
Returns:
list of np.array objects, if any
"""
return []
def forward(self, input_dict, state, seq_lens):
"""Call the model with the given input tensors and state.
Any complex observations (dicts, tuples, etc.) will be unpacked by
__call__ before being passed to forward(). To access the flattened
observation tensor, refer to input_dict["obs_flat"].
This method can be called any number of times. In eager execution,
each call to forward() will eagerly evaluate the model. In symbolic
execution, each call to forward creates a computation graph that
operates over the variables of this model (i.e., shares weights).
Custom models should override this instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"obs_flat", "prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, num_outputs]
"""
raise NotImplementedError
def value_function(self):
"""Return the value function estimate for the most recent forward pass.
Returns:
value estimate tensor of shape [BATCH].
"""
raise NotImplementedError
def custom_loss(self, policy_loss, loss_inputs):
"""Override to customize the loss function used to optimize this model.
This can be used to incorporate self-supervised losses (by defining
a loss over existing input and output tensors of this model), and
supervised losses (by defining losses over a variable-sharing copy of
this model's layers).
You can find an runnable example in examples/custom_loss.py.
Arguments:
policy_loss (Tensor): scalar policy loss from the policy.
loss_inputs (dict): map of input placeholders for rollout data.
Returns:
Scalar tensor for the customized loss for this model.
"""
return policy_loss
def metrics(self):
"""Override to return custom metrics from your model.
The stats will be reported as part of the learner stats, i.e.,
info:
learner:
model:
key1: metric1
key2: metric2
Returns:
Dict of string keys to scalar tensors.
"""
return {}
def __call__(self, input_dict, state=None, seq_lens=None):
"""Call the model with the given input tensors and state.
This is the method used by RLlib to execute the forward pass. It calls
forward() internally after unpacking nested observation tensors.
Custom models should override forward() instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, output_spec.size] or a list of tensors corresponding to
output_spec.shape_list, and a list of state tensors of
[BATCH, state_size_i].
"""
restored = input_dict.copy()
restored["obs"] = restore_original_dimensions(
input_dict["obs"], self.obs_space, self.framework)
if len(input_dict["obs"].shape) > 2:
restored["obs_flat"] = flatten(input_dict["obs"], self.framework)
else:
restored["obs_flat"] = input_dict["obs"]
with self.context():
res = self.forward(restored, state or [], seq_lens)
if ((not isinstance(res, list) and not isinstance(res, tuple))
or len(res) != 2):
raise ValueError(
"forward() must return a tuple of (output, state) tensors, "
"got {}".format(res))
outputs, state = res
try:
shape = outputs.shape
except AttributeError:
raise ValueError("Output is not a tensor: {}".format(outputs))
else:
if len(shape) != 2 or shape[1] != self.num_outputs:
raise ValueError(
"Expected output shape of [None, {}], got {}".format(
self.num_outputs, shape))
if not isinstance(state, list):
raise ValueError("State output is not a list: {}".format(state))
self._last_output = outputs
return outputs, state
def from_batch(self, train_batch, is_training=True):
"""Convenience function that calls this model with a tensor batch.
All this does is unpack the tensor batch to call this model with the
right input dict, state, and seq len arguments.
"""
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": is_training,
}
if SampleBatch.PREV_ACTIONS in train_batch:
input_dict["prev_actions"] = train_batch[SampleBatch.PREV_ACTIONS]
if SampleBatch.PREV_REWARDS in train_batch:
input_dict["prev_rewards"] = train_batch[SampleBatch.PREV_REWARDS]
states = []
i = 0
while "state_in_{}".format(i) in train_batch:
states.append(train_batch["state_in_{}".format(i)])
i += 1
return self.__call__(input_dict, states, train_batch.get("seq_lens"))
def last_output(self):
"""Returns the last output returned from calling the model."""
return self._last_output
def context(self):
"""Returns a contextmanager for the current forward pass."""
return NullContextManager()
class NullContextManager:
"""No-op context manager"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/preprocessors.py
|
Python
|
from collections import OrderedDict
import cv2
import logging
import numpy as np
import gym
from ray.rllib.utils.annotations import override, PublicAPI
ATARI_OBS_SHAPE = (210, 160, 3)
ATARI_RAM_OBS_SHAPE = (128, )
VALIDATION_INTERVAL = 100
logger = logging.getLogger(__name__)
@PublicAPI
class Preprocessor:
"""Defines an abstract observation preprocessor function.
Attributes:
shape (obj): Shape of the preprocessed output.
"""
@PublicAPI
def __init__(self, obs_space, options=None):
legacy_patch_shapes(obs_space)
self._obs_space = obs_space
if not options:
from ray.rllib.models.catalog import MODEL_DEFAULTS
self._options = MODEL_DEFAULTS.copy()
else:
self._options = options
self.shape = self._init_shape(obs_space, self._options)
self._size = int(np.product(self.shape))
self._i = 0
@PublicAPI
def _init_shape(self, obs_space, options):
"""Returns the shape after preprocessing."""
raise NotImplementedError
@PublicAPI
def transform(self, observation):
"""Returns the preprocessed observation."""
raise NotImplementedError
def write(self, observation, array, offset):
"""Alternative to transform for more efficient flattening."""
array[offset:offset + self._size] = self.transform(observation)
def check_shape(self, observation):
"""Checks the shape of the given observation."""
if self._i % VALIDATION_INTERVAL == 0:
if type(observation) is list and isinstance(
self._obs_space, gym.spaces.Box):
observation = np.array(observation)
try:
if not self._obs_space.contains(observation):
raise ValueError(
"Observation outside expected value range",
self._obs_space, observation)
except AttributeError:
raise ValueError(
"Observation for a Box/MultiBinary/MultiDiscrete space "
"should be an np.array, not a Python list.", observation)
self._i += 1
@property
@PublicAPI
def size(self):
return self._size
@property
@PublicAPI
def observation_space(self):
obs_space = gym.spaces.Box(-1., 1., self.shape, dtype=np.float32)
# Stash the unwrapped space so that we can unwrap dict and tuple spaces
# automatically in model.py
if (isinstance(self, TupleFlatteningPreprocessor)
or isinstance(self, DictFlatteningPreprocessor)):
obs_space.original_space = self._obs_space
return obs_space
class GenericPixelPreprocessor(Preprocessor):
"""Generic image preprocessor.
Note: for Atari games, use config {"preprocessor_pref": "deepmind"}
instead for deepmind-style Atari preprocessing.
"""
@override(Preprocessor)
def _init_shape(self, obs_space, options):
self._grayscale = options.get("grayscale")
self._zero_mean = options.get("zero_mean")
self._dim = options.get("dim")
if self._grayscale:
shape = (self._dim, self._dim, 1)
else:
shape = (self._dim, self._dim, 3)
return shape
@override(Preprocessor)
def transform(self, observation):
"""Downsamples images from (210, 160, 3) by the configured factor."""
self.check_shape(observation)
scaled = observation[25:-25, :, :]
if self._dim < 84:
scaled = cv2.resize(scaled, (84, 84))
# OpenAI: Resize by half, then down to 42x42 (essentially mipmapping).
# If we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
scaled = cv2.resize(scaled, (self._dim, self._dim))
if self._grayscale:
scaled = scaled.mean(2)
scaled = scaled.astype(np.float32)
# Rescale needed for maintaining 1 channel
scaled = np.reshape(scaled, [self._dim, self._dim, 1])
if self._zero_mean:
scaled = (scaled - 128) / 128
else:
scaled *= 1.0 / 255.0
return scaled
class AtariRamPreprocessor(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space, options):
return (128, )
@override(Preprocessor)
def transform(self, observation):
self.check_shape(observation)
return (observation - 128) / 128
class OneHotPreprocessor(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space, options):
return (self._obs_space.n, )
@override(Preprocessor)
def transform(self, observation):
self.check_shape(observation)
arr = np.zeros(self._obs_space.n)
arr[observation] = 1
return arr
@override(Preprocessor)
def write(self, observation, array, offset):
array[offset + observation] = 1
class NoPreprocessor(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space, options):
return self._obs_space.shape
@override(Preprocessor)
def transform(self, observation):
self.check_shape(observation)
return observation
@override(Preprocessor)
def write(self, observation, array, offset):
array[offset:offset + self._size] = np.array(
observation, copy=False).ravel()
@property
@override(Preprocessor)
def observation_space(self):
return self._obs_space
class TupleFlatteningPreprocessor(Preprocessor):
"""Preprocesses each tuple element, then flattens it all into a vector.
RLlib models will unpack the flattened output before _build_layers_v2().
"""
@override(Preprocessor)
def _init_shape(self, obs_space, options):
assert isinstance(self._obs_space, gym.spaces.Tuple)
size = 0
self.preprocessors = []
for i in range(len(self._obs_space.spaces)):
space = self._obs_space.spaces[i]
logger.debug("Creating sub-preprocessor for {}".format(space))
preprocessor = get_preprocessor(space)(space, self._options)
self.preprocessors.append(preprocessor)
size += preprocessor.size
return (size, )
@override(Preprocessor)
def transform(self, observation):
self.check_shape(observation)
array = np.zeros(self.shape)
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation, array, offset):
assert len(observation) == len(self.preprocessors), observation
for o, p in zip(observation, self.preprocessors):
p.write(o, array, offset)
offset += p.size
class DictFlatteningPreprocessor(Preprocessor):
"""Preprocesses each dict value, then flattens it all into a vector.
RLlib models will unpack the flattened output before _build_layers_v2().
"""
@override(Preprocessor)
def _init_shape(self, obs_space, options):
assert isinstance(self._obs_space, gym.spaces.Dict)
size = 0
self.preprocessors = []
for space in self._obs_space.spaces.values():
logger.debug("Creating sub-preprocessor for {}".format(space))
preprocessor = get_preprocessor(space)(space, self._options)
self.preprocessors.append(preprocessor)
size += preprocessor.size
return (size, )
@override(Preprocessor)
def transform(self, observation):
self.check_shape(observation)
array = np.zeros(self.shape)
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation, array, offset):
if not isinstance(observation, OrderedDict):
observation = OrderedDict(sorted(observation.items()))
assert len(observation) == len(self.preprocessors), \
(len(observation), len(self.preprocessors))
for o, p in zip(observation.values(), self.preprocessors):
p.write(o, array, offset)
offset += p.size
@PublicAPI
def get_preprocessor(space):
"""Returns an appropriate preprocessor class for the given space."""
legacy_patch_shapes(space)
obs_shape = space.shape
if isinstance(space, gym.spaces.Discrete):
preprocessor = OneHotPreprocessor
elif obs_shape == ATARI_OBS_SHAPE:
preprocessor = GenericPixelPreprocessor
elif obs_shape == ATARI_RAM_OBS_SHAPE:
preprocessor = AtariRamPreprocessor
elif isinstance(space, gym.spaces.Tuple):
preprocessor = TupleFlatteningPreprocessor
elif isinstance(space, gym.spaces.Dict):
preprocessor = DictFlatteningPreprocessor
else:
preprocessor = NoPreprocessor
return preprocessor
def legacy_patch_shapes(space):
"""Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shapes properly
for Tuple and Discrete spaces.
"""
if not hasattr(space, "shape"):
if isinstance(space, gym.spaces.Discrete):
space.shape = ()
elif isinstance(space, gym.spaces.Tuple):
shapes = []
for s in space.spaces:
shape = legacy_patch_shapes(s)
shapes.append(shape)
space.shape = tuple(shapes)
return space.shape
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/fcnet_v1.py
|
Python
|
from ray.rllib.models.model import Model
from ray.rllib.models.tf.misc import normc_initializer, get_activation_fn
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# Deprecated: see as an alternative models/tf/fcnet_v2.py
class FullyConnectedNetwork(Model):
"""Generic fully connected network."""
@override(Model)
def _build_layers(self, inputs, num_outputs, options):
"""Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2().
"""
hiddens = options.get("fcnet_hiddens")
activation = get_activation_fn(options.get("fcnet_activation"))
if len(inputs.shape) > 2:
inputs = tf.layers.flatten(inputs)
with tf.name_scope("fc_net"):
i = 1
last_layer = inputs
for size in hiddens:
# skip final linear layer
if options.get("no_final_linear") and i == len(hiddens):
output = tf.layers.dense(
last_layer,
num_outputs,
kernel_initializer=normc_initializer(1.0),
activation=activation,
name="fc_out")
return output, output
label = "fc{}".format(i)
last_layer = tf.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=activation,
name=label)
i += 1
output = tf.layers.dense(
last_layer,
num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")
return output, last_layer
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/fcnet_v2.py
|
Python
|
import numpy as np
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.misc import normc_initializer, get_activation_fn
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class FullyConnectedNetwork(TFModelV2):
"""Generic fully connected network implemented in ModelV2 API."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(FullyConnectedNetwork, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
activation = get_activation_fn(model_config.get("fcnet_activation"))
hiddens = model_config.get("fcnet_hiddens")
no_final_linear = model_config.get("no_final_linear")
vf_share_layers = model_config.get("vf_share_layers")
# we are using obs_flat, so take the flattened shape as input
inputs = tf.keras.layers.Input(
shape=(np.product(obs_space.shape), ), name="observations")
last_layer = inputs
i = 1
if no_final_linear:
# the last layer is adjusted to be of size num_outputs
for size in hiddens[:-1]:
last_layer = tf.keras.layers.Dense(
size,
name="fc_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0))(last_layer)
i += 1
layer_out = tf.keras.layers.Dense(
num_outputs,
name="fc_out",
activation=activation,
kernel_initializer=normc_initializer(1.0))(last_layer)
else:
# the last layer is a linear to size num_outputs
for size in hiddens:
last_layer = tf.keras.layers.Dense(
size,
name="fc_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0))(last_layer)
i += 1
layer_out = tf.keras.layers.Dense(
num_outputs,
name="fc_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(last_layer)
if not vf_share_layers:
# build a parallel set of hidden layers for the value net
last_layer = inputs
i = 1
for size in hiddens:
last_layer = tf.keras.layers.Dense(
size,
name="fc_value_{}".format(i),
activation=activation,
kernel_initializer=normc_initializer(1.0))(last_layer)
i += 1
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(last_layer)
self.base_model = tf.keras.Model(inputs, [layer_out, value_out])
self.register_variables(self.base_model.variables)
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(input_dict["obs_flat"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/lstm_v1.py
|
Python
|
import numpy as np
from ray.rllib.models.model import Model
from ray.rllib.models.tf.misc import linear, normc_initializer
from ray.rllib.policy.rnn_sequencing import add_time_dimension
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# Deprecated: see as an alternative models/tf/recurrent_tf_modelv2.py
class LSTM(Model):
"""Adds a LSTM cell on top of some other model output.
Uses a linear layer at the end for output.
Important: we assume inputs is a padded batch of sequences denoted by
self.seq_lens. See add_time_dimension() for more information.
"""
@override(Model)
def _build_layers_v2(self, input_dict, num_outputs, options):
cell_size = options.get("lstm_cell_size")
if options.get("lstm_use_prev_action_reward"):
action_dim = int(
np.product(
input_dict["prev_actions"].get_shape().as_list()[1:]))
features = tf.concat(
[
input_dict["obs"],
tf.reshape(
tf.cast(input_dict["prev_actions"], tf.float32),
[-1, action_dim]),
tf.reshape(input_dict["prev_rewards"], [-1, 1]),
],
axis=1)
else:
features = input_dict["obs"]
last_layer = add_time_dimension(features, self.seq_lens)
# Setup the LSTM cell
lstm = tf.nn.rnn_cell.LSTMCell(cell_size, state_is_tuple=True)
self.state_init = [
np.zeros(lstm.state_size.c, np.float32),
np.zeros(lstm.state_size.h, np.float32)
]
# Setup LSTM inputs
if self.state_in:
c_in, h_in = self.state_in
else:
c_in = tf.placeholder(
tf.float32, [None, lstm.state_size.c], name="c")
h_in = tf.placeholder(
tf.float32, [None, lstm.state_size.h], name="h")
self.state_in = [c_in, h_in]
# Setup LSTM outputs
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_out, lstm_state = tf.nn.dynamic_rnn(
lstm,
last_layer,
initial_state=state_in,
sequence_length=self.seq_lens,
time_major=False,
dtype=tf.float32)
self.state_out = list(lstm_state)
# Compute outputs
last_layer = tf.reshape(lstm_out, [-1, cell_size])
logits = linear(last_layer, num_outputs, "action",
normc_initializer(0.01))
return logits, last_layer
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/misc.py
|
Python
|
import numpy as np
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def get_activation_fn(name):
if name == "linear":
return None
return getattr(tf.nn, name)
def conv2d(x,
num_filters,
name,
filter_size=(3, 3),
stride=(1, 1),
pad="SAME",
dtype=None,
collections=None):
if dtype is None:
dtype = tf.float32
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [
filter_size[0], filter_size[1],
int(x.get_shape()[3]), num_filters
]
# There are "num input feature maps * filter height * filter width"
# inputs to each hidden unit.
fan_in = np.prod(filter_shape[:3])
# Each unit in the lower layer receives a gradient from: "num output
# feature maps * filter height * filter width" / pooling size.
fan_out = np.prod(filter_shape[:2]) * num_filters
# Initialize weights with random weights.
w_bound = np.sqrt(6 / (fan_in + fan_out))
w = tf.get_variable(
"W",
filter_shape,
dtype,
tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable(
"b", [1, 1, 1, num_filters],
initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def linear(x, size, name, initializer=None, bias_init=0):
w = tf.get_variable(
name + "/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable(
name + "/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/modelv1_compat.py
|
Python
|
import logging
import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.misc import linear, normc_initializer
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.debug import log_once
from ray.rllib.utils.tf_ops import scope_vars
tf = try_import_tf()
logger = logging.getLogger(__name__)
def make_v1_wrapper(legacy_model_cls):
class ModelV1Wrapper(TFModelV2):
"""Wrapper that allows V1 models to be used as ModelV2."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TFModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
self.legacy_model_cls = legacy_model_cls
# Tracks the last v1 model created by the call to forward
self.cur_instance = None
# XXX: Try to guess the initial state size. Since the size of the
# state is known only after forward() for V1 models, it might be
# wrong.
if model_config.get("state_shape"):
self.initial_state = [
np.zeros(s, np.float32)
for s in model_config["state_shape"]
]
elif model_config.get("use_lstm"):
cell_size = model_config.get("lstm_cell_size", 256)
self.initial_state = [
np.zeros(cell_size, np.float32),
np.zeros(cell_size, np.float32),
]
else:
self.initial_state = []
# Tracks update ops
self._update_ops = None
with tf.variable_scope(self.name) as scope:
self.variable_scope = scope
@override(ModelV2)
def get_initial_state(self):
return self.initial_state
@override(ModelV2)
def __call__(self, input_dict, state, seq_lens):
if self.cur_instance:
# create a weight-sharing model copy
with tf.variable_scope(self.cur_instance.scope, reuse=True):
new_instance = self.legacy_model_cls(
input_dict, self.obs_space, self.action_space,
self.num_outputs, self.model_config, state, seq_lens)
else:
# create a new model instance
with tf.variable_scope(self.name):
prev_update_ops = set(
tf.get_collection(tf.GraphKeys.UPDATE_OPS))
new_instance = self.legacy_model_cls(
input_dict, self.obs_space, self.action_space,
self.num_outputs, self.model_config, state, seq_lens)
self._update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) -
prev_update_ops)
if len(new_instance.state_init) != len(self.get_initial_state()):
raise ValueError(
"When using a custom recurrent ModelV1 model, you should "
"declare the state_shape in the model options. For "
"example, set 'state_shape': [256, 256] for a lstm with "
"cell size 256. The guessed state shape was {} which "
"appears to be incorrect.".format(
[s.shape[0] for s in self.get_initial_state()]))
self.cur_instance = new_instance
self.variable_scope = new_instance.scope
return new_instance.outputs, new_instance.state_out
@override(TFModelV2)
def update_ops(self):
if self._update_ops is None:
raise ValueError(
"Cannot get update ops before wrapped v1 model init")
return list(self._update_ops)
@override(TFModelV2)
def variables(self):
var_list = super(ModelV1Wrapper, self).variables()
for v in scope_vars(self.variable_scope):
if v not in var_list:
var_list.append(v)
return var_list
@override(ModelV2)
def custom_loss(self, policy_loss, loss_inputs):
return self.cur_instance.custom_loss(policy_loss, loss_inputs)
@override(ModelV2)
def metrics(self):
return self.cur_instance.custom_stats()
@override(ModelV2)
def value_function(self):
assert self.cur_instance, "must call forward first"
with tf.variable_scope(self.variable_scope):
with tf.variable_scope("value_function", reuse=tf.AUTO_REUSE):
# Simple case: sharing the feature layer
if self.model_config["vf_share_layers"]:
return tf.reshape(
linear(self.cur_instance.last_layer, 1,
"value_function", normc_initializer(1.0)),
[-1])
# Create a new separate model with no RNN state, etc.
branch_model_config = self.model_config.copy()
branch_model_config["free_log_std"] = False
if branch_model_config["use_lstm"]:
branch_model_config["use_lstm"] = False
if log_once("vf_warn"):
logger.warning(
"It is not recommended to use a LSTM model "
"with vf_share_layers=False (consider setting "
"it to True). If you want to not share "
"layers, you can implement a custom LSTM "
"model that overrides the value_function() "
"method.")
branch_instance = self.legacy_model_cls(
self.cur_instance.input_dict,
self.obs_space,
self.action_space,
1,
branch_model_config,
state_in=None,
seq_lens=None)
return tf.reshape(branch_instance.outputs, [-1])
@override(ModelV2)
def last_output(self):
return self.cur_instance.outputs
return ModelV1Wrapper
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/recurrent_tf_modelv2.py
|
Python
|
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.policy.rnn_sequencing import add_time_dimension
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@DeveloperAPI
class RecurrentTFModelV2(TFModelV2):
"""Helper class to simplify implementing RNN models with TFModelV2.
Instead of implementing forward(), you can implement forward_rnn() which
takes batches with the time dimension added already."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
"""Initialize a TFModelV2.
Here is an example implementation for a subclass
``MyRNNClass(RecurrentTFModelV2)``::
def __init__(self, *args, **kwargs):
super(MyModelClass, self).__init__(*args, **kwargs)
cell_size = 256
# Define input layers
input_layer = tf.keras.layers.Input(
shape=(None, obs_space.shape[0]))
state_in_h = tf.keras.layers.Input(shape=(256, ))
state_in_c = tf.keras.layers.Input(shape=(256, ))
seq_in = tf.keras.layers.Input(shape=(), dtype=tf.int32)
# Send to LSTM cell
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True,
name="lstm")(
inputs=input_layer,
mask=tf.sequence_mask(seq_in),
initial_state=[state_in_h, state_in_c])
output_layer = tf.keras.layers.Dense(...)(lstm_out)
# Create the RNN model
self.rnn_model = tf.keras.Model(
inputs=[input_layer, seq_in, state_in_h, state_in_c],
outputs=[output_layer, state_h, state_c])
self.register_variables(self.rnn_model.variables)
self.rnn_model.summary()
"""
TFModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
"""Adds time dimension to batch before sending inputs to forward_rnn().
You should implement forward_rnn() in your subclass."""
output, new_state = self.forward_rnn(
add_time_dimension(input_dict["obs_flat"], seq_lens), state,
seq_lens)
return tf.reshape(output, [-1, self.num_outputs]), new_state
def forward_rnn(self, inputs, state, seq_lens):
"""Call the model with the given input tensors and state.
Arguments:
inputs (dict): observation tensor with shape [B, T, obs_size].
state (list): list of state tensors, each with shape [B, T, size].
seq_lens (Tensor): 1d tensor holding input sequence lengths.
Returns:
(outputs, new_state): The model output tensor of shape
[B, T, num_outputs] and the list of new state tensors each with
shape [B, size].
Sample implementation for the ``MyRNNClass`` example::
def forward_rnn(self, inputs, state, seq_lens):
model_out, h, c = self.rnn_model([inputs, seq_lens] + state)
return model_out, [h, c]
"""
raise NotImplementedError("You must implement this for a RNN model")
def get_initial_state(self):
"""Get the initial recurrent state values for the model.
Returns:
list of np.array objects, if any
Sample implementation for the ``MyRNNClass`` example::
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
"""
raise NotImplementedError("You must implement this for a RNN model")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/tf_action_dist.py
|
Python
|
import numpy as np
import functools
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.policy.policy import TupleActions
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@DeveloperAPI
class TFActionDistribution(ActionDistribution):
"""TF-specific extensions for building action distributions."""
@DeveloperAPI
def __init__(self, inputs, model):
super(TFActionDistribution, self).__init__(inputs, model)
self.sample_op = self._build_sample_op()
@DeveloperAPI
def _build_sample_op(self):
"""Implement this instead of sample(), to enable op reuse.
This is needed since the sample op is non-deterministic and is shared
between sample() and sampled_action_logp().
"""
raise NotImplementedError
@override(ActionDistribution)
def sample(self):
"""Draw a sample from the action distribution."""
return self.sample_op
@override(ActionDistribution)
def sampled_action_logp(self):
"""Returns the log probability of the sampled action."""
return self.logp(self.sample_op)
class Categorical(TFActionDistribution):
"""Categorical distribution for discrete action spaces."""
@DeveloperAPI
def __init__(self, inputs, model=None):
super(Categorical, self).__init__(inputs, model)
@override(ActionDistribution)
def logp(self, x):
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.inputs, labels=tf.cast(x, tf.int32))
@override(ActionDistribution)
def entropy(self):
a0 = self.inputs - tf.reduce_max(
self.inputs, reduction_indices=[1], keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), reduction_indices=[1])
@override(ActionDistribution)
def kl(self, other):
a0 = self.inputs - tf.reduce_max(
self.inputs, reduction_indices=[1], keep_dims=True)
a1 = other.inputs - tf.reduce_max(
other.inputs, reduction_indices=[1], keep_dims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)
z1 = tf.reduce_sum(ea1, reduction_indices=[1], keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(
p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), reduction_indices=[1])
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.squeeze(tf.multinomial(self.inputs, 1), axis=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class MultiCategorical(TFActionDistribution):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
def __init__(self, inputs, model, input_lens):
# skip TFActionDistribution init
ActionDistribution.__init__(self, inputs, model)
self.cats = [
Categorical(input_, model)
for input_ in tf.split(inputs, input_lens, axis=1)
]
self.sample_op = self._build_sample_op()
@override(ActionDistribution)
def logp(self, actions):
# If tensor is provided, unstack it into list
if isinstance(actions, tf.Tensor):
actions = tf.unstack(tf.cast(actions, tf.int32), axis=1)
logps = tf.stack(
[cat.logp(act) for cat, act in zip(self.cats, actions)])
return tf.reduce_sum(logps, axis=0)
@override(ActionDistribution)
def multi_entropy(self):
return tf.stack([cat.entropy() for cat in self.cats], axis=1)
@override(ActionDistribution)
def entropy(self):
return tf.reduce_sum(self.multi_entropy(), axis=1)
@override(ActionDistribution)
def multi_kl(self, other):
return [cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)]
@override(ActionDistribution)
def kl(self, other):
return tf.reduce_sum(self.multi_kl(other), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.stack([cat.sample() for cat in self.cats], axis=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.sum(action_space.nvec)
class DiagGaussian(TFActionDistribution):
"""Action distribution where each vector element is a gaussian.
The first half of the input vector defines the gaussian means, and the
second half the gaussian standard deviations.
"""
def __init__(self, inputs, model):
mean, log_std = tf.split(inputs, 2, axis=1)
self.mean = mean
self.log_std = log_std
self.std = tf.exp(log_std)
TFActionDistribution.__init__(self, inputs, model)
@override(ActionDistribution)
def logp(self, x):
return (-0.5 * tf.reduce_sum(
tf.square((x - self.mean) / self.std), reduction_indices=[1]) -
0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[1]) -
tf.reduce_sum(self.log_std, reduction_indices=[1]))
@override(ActionDistribution)
def kl(self, other):
assert isinstance(other, DiagGaussian)
return tf.reduce_sum(
other.log_std - self.log_std +
(tf.square(self.std) + tf.square(self.mean - other.mean)) /
(2.0 * tf.square(other.std)) - 0.5,
reduction_indices=[1])
@override(ActionDistribution)
def entropy(self):
return tf.reduce_sum(
self.log_std + .5 * np.log(2.0 * np.pi * np.e),
reduction_indices=[1])
@override(TFActionDistribution)
def _build_sample_op(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class Deterministic(TFActionDistribution):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero.
"""
@override(TFActionDistribution)
def sampled_action_logp(self):
return 0.0
@override(TFActionDistribution)
def _build_sample_op(self):
return self.inputs
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
class MultiActionDistribution(TFActionDistribution):
"""Action distribution that operates for list of actions.
Args:
inputs (Tensor list): A list of tensors from which to compute samples.
"""
def __init__(self, inputs, model, action_space, child_distributions,
input_lens):
# skip TFActionDistribution init
ActionDistribution.__init__(self, inputs, model)
self.input_lens = input_lens
split_inputs = tf.split(inputs, self.input_lens, axis=1)
child_list = []
for i, distribution in enumerate(child_distributions):
child_list.append(distribution(split_inputs[i], model))
self.child_distributions = child_list
@override(ActionDistribution)
def logp(self, x):
split_indices = []
for dist in self.child_distributions:
if isinstance(dist, Categorical):
split_indices.append(1)
else:
split_indices.append(tf.shape(dist.sample())[1])
split_list = tf.split(x, split_indices, axis=1)
for i, distribution in enumerate(self.child_distributions):
# Remove extra categorical dimension
if isinstance(distribution, Categorical):
split_list[i] = tf.cast(
tf.squeeze(split_list[i], axis=-1), tf.int32)
log_list = [
distribution.logp(split_x) for distribution, split_x in zip(
self.child_distributions, split_list)
]
return functools.reduce(lambda a, b: a + b, log_list)
@override(ActionDistribution)
def kl(self, other):
kl_list = [
distribution.kl(other_distribution)
for distribution, other_distribution in zip(
self.child_distributions, other.child_distributions)
]
return functools.reduce(lambda a, b: a + b, kl_list)
@override(ActionDistribution)
def entropy(self):
entropy_list = [s.entropy() for s in self.child_distributions]
return functools.reduce(lambda a, b: a + b, entropy_list)
@override(ActionDistribution)
def sample(self):
return TupleActions([s.sample() for s in self.child_distributions])
@override(TFActionDistribution)
def sampled_action_logp(self):
p = self.child_distributions[0].sampled_action_logp()
for c in self.child_distributions[1:]:
p += c.sampled_action_logp()
return p
class Dirichlet(TFActionDistribution):
"""Dirichlet distribution for continuous actions that are between
[0,1] and sum to 1.
e.g. actions that represent resource allocation."""
def __init__(self, inputs, model):
"""Input is a tensor of logits. The exponential of logits is used to
parametrize the Dirichlet distribution as all parameters need to be
positive. An arbitrary small epsilon is added to the concentration
parameters to be zero due to numerical error.
See issue #4440 for more details.
"""
self.epsilon = 1e-7
concentration = tf.exp(inputs) + self.epsilon
self.dist = tf.distributions.Dirichlet(
concentration=concentration,
validate_args=True,
allow_nan_stats=False,
)
TFActionDistribution.__init__(self, concentration, model)
@override(ActionDistribution)
def logp(self, x):
# Support of Dirichlet are positive real numbers. x is already be
# an array of positive number, but we clip to avoid zeros due to
# numerical errors.
x = tf.maximum(x, self.epsilon)
x = x / tf.reduce_sum(x, axis=-1, keepdims=True)
return self.dist.log_prob(x)
@override(ActionDistribution)
def entropy(self):
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other):
return self.dist.kl_divergence(other.dist)
@override(TFActionDistribution)
def _build_sample_op(self):
return self.dist.sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/tf_modelv2.py
|
Python
|
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
@PublicAPI
class TFModelV2(ModelV2):
"""TF version of ModelV2.
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
"""Initialize a TFModelV2.
Here is an example implementation for a subclass
``MyModelClass(TFModelV2)``::
def __init__(self, *args, **kwargs):
super(MyModelClass, self).__init__(*args, **kwargs)
input_layer = tf.keras.layers.Input(...)
hidden_layer = tf.keras.layers.Dense(...)(input_layer)
output_layer = tf.keras.layers.Dense(...)(hidden_layer)
value_layer = tf.keras.layers.Dense(...)(hidden_layer)
self.base_model = tf.keras.Model(
input_layer, [output_layer, value_layer])
self.register_variables(self.base_model.variables)
"""
ModelV2.__init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
framework="tf")
self.var_list = []
if tf.executing_eagerly():
self.graph = None
else:
self.graph = tf.get_default_graph()
def context(self):
"""Returns a contextmanager for the current TF graph."""
if self.graph:
return self.graph.as_default()
else:
return ModelV2.context(self)
def forward(self, input_dict, state, seq_lens):
"""Call the model with the given input tensors and state.
Any complex observations (dicts, tuples, etc.) will be unpacked by
__call__ before being passed to forward(). To access the flattened
observation tensor, refer to input_dict["obs_flat"].
This method can be called any number of times. In eager execution,
each call to forward() will eagerly evaluate the model. In symbolic
execution, each call to forward creates a computation graph that
operates over the variables of this model (i.e., shares weights).
Custom models should override this instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"obs_flat", "prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, num_outputs]
Sample implementation for the ``MyModelClass`` example::
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
"""
raise NotImplementedError
def value_function(self):
"""Return the value function estimate for the most recent forward pass.
Returns:
value estimate tensor of shape [BATCH].
Sample implementation for the ``MyModelClass`` example::
def value_function(self):
return self._value_out
"""
raise NotImplementedError
def update_ops(self):
"""Return the list of update ops for this model.
For example, this should include any BatchNorm update ops."""
return []
def register_variables(self, variables):
"""Register the given list of variables with this model."""
self.var_list.extend(variables)
def variables(self):
"""Returns the list of variables for this model."""
return list(self.var_list)
def trainable_variables(self):
"""Returns the list of trainable variables for this model."""
return [v for v in self.variables() if v.trainable]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/visionnet_v1.py
|
Python
|
from ray.rllib.models.model import Model
from ray.rllib.models.tf.misc import get_activation_fn, flatten
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
# Deprecated: see as an alternative models/tf/visionnet_v2.py
class VisionNetwork(Model):
"""Generic vision network."""
@override(Model)
def _build_layers_v2(self, input_dict, num_outputs, options):
inputs = input_dict["obs"]
filters = options.get("conv_filters")
if not filters:
filters = _get_filter_config(inputs.shape.as_list()[1:])
activation = get_activation_fn(options.get("conv_activation"))
with tf.name_scope("vision_net"):
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
inputs = tf.layers.conv2d(
inputs,
out_size,
kernel,
stride,
activation=activation,
padding="same",
name="conv{}".format(i))
out_size, kernel, stride = filters[-1]
# skip final linear layer
if options.get("no_final_linear"):
fc_out = tf.layers.conv2d(
inputs,
num_outputs,
kernel,
stride,
activation=activation,
padding="valid",
name="fc_out")
return flatten(fc_out), flatten(fc_out)
fc1 = tf.layers.conv2d(
inputs,
out_size,
kernel,
stride,
activation=activation,
padding="valid",
name="fc1")
fc2 = tf.layers.conv2d(
fc1,
num_outputs, [1, 1],
activation=None,
padding="same",
name="fc2")
return flatten(fc2), flatten(fc1)
def _get_filter_config(shape):
shape = list(shape)
filters_84x84 = [
[16, [8, 8], 4],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
filters_42x42 = [
[16, [4, 4], 2],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
if len(shape) == 3 and shape[:2] == [84, 84]:
return filters_84x84
elif len(shape) == 3 and shape[:2] == [42, 42]:
return filters_42x42
else:
raise ValueError(
"No default configuration for obs shape {}".format(shape) +
", you must specify `conv_filters` manually as a model option. "
"Default configurations are only available for inputs of shape "
"[42, 42, K] and [84, 84, K]. You may alternatively want "
"to use a custom model or preprocessor.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/tf/visionnet_v2.py
|
Python
|
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.visionnet_v1 import _get_filter_config
from ray.rllib.models.tf.misc import normc_initializer, get_activation_fn
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class VisionNetwork(TFModelV2):
"""Generic vision network implemented in ModelV2 API."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(VisionNetwork, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
activation = get_activation_fn(model_config.get("conv_activation"))
filters = model_config.get("conv_filters")
if not filters:
filters = _get_filter_config(obs_space.shape)
no_final_linear = model_config.get("no_final_linear")
vf_share_layers = model_config.get("vf_share_layers")
inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
last_layer = inputs
# Build the action layers
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=(stride, stride),
activation=activation,
padding="same",
name="conv{}".format(i))(last_layer)
out_size, kernel, stride = filters[-1]
if no_final_linear:
# the last layer is adjusted to be of size num_outputs
last_layer = tf.keras.layers.Conv2D(
num_outputs,
kernel,
strides=(stride, stride),
activation=activation,
padding="valid",
name="conv_out")(last_layer)
conv_out = last_layer
else:
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=(stride, stride),
activation=activation,
padding="valid",
name="conv{}".format(i + 1))(last_layer)
conv_out = tf.keras.layers.Conv2D(
num_outputs, [1, 1],
activation=None,
padding="same",
name="conv_out")(last_layer)
# Build the value layers
if vf_share_layers:
last_layer = tf.keras.layers.Lambda(
lambda x: tf.squeeze(x, axis=[1, 2]))(last_layer)
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01))(last_layer)
else:
# build a parallel set of hidden layers for the value net
last_layer = inputs
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=(stride, stride),
activation=activation,
padding="same",
name="conv_value_{}".format(i))(last_layer)
out_size, kernel, stride = filters[-1]
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=(stride, stride),
activation=activation,
padding="valid",
name="conv_value_{}".format(i + 1))(last_layer)
last_layer = tf.keras.layers.Conv2D(
1, [1, 1],
activation=None,
padding="same",
name="conv_value_out")(last_layer)
value_out = tf.keras.layers.Lambda(
lambda x: tf.squeeze(x, axis=[1, 2]))(last_layer)
self.base_model = tf.keras.Model(inputs, [conv_out, value_out])
self.register_variables(self.base_model.variables)
def forward(self, input_dict, state, seq_lens):
# explicit cast to float32 needed in eager
model_out, self._value_out = self.base_model(
tf.cast(input_dict["obs"], tf.float32))
return tf.squeeze(model_out, axis=[1, 2]), state
def value_function(self):
return tf.reshape(self._value_out, [-1])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/torch/fcnet.py
|
Python
|
import logging
import numpy as np
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import normc_initializer, SlimFC, \
_get_activation_fn
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_torch
_, nn = try_import_torch()
logger = logging.getLogger(__name__)
class FullyConnectedNetwork(TorchModelV2, nn.Module):
"""Generic fully connected network."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
hiddens = model_config.get("fcnet_hiddens")
activation = _get_activation_fn(model_config.get("fcnet_activation"))
logger.debug("Constructing fcnet {} {}".format(hiddens, activation))
layers = []
last_layer_size = np.product(obs_space.shape)
for size in hiddens:
layers.append(
SlimFC(
in_size=last_layer_size,
out_size=size,
initializer=normc_initializer(1.0),
activation_fn=activation))
last_layer_size = size
self._hidden_layers = nn.Sequential(*layers)
self._logits = SlimFC(
in_size=last_layer_size,
out_size=num_outputs,
initializer=normc_initializer(0.01),
activation_fn=None)
self._value_branch = SlimFC(
in_size=last_layer_size,
out_size=1,
initializer=normc_initializer(1.0),
activation_fn=None)
self._cur_value = None
@override(TorchModelV2)
def forward(self, input_dict, state, seq_lens):
obs = input_dict["obs_flat"]
features = self._hidden_layers(obs.reshape(obs.shape[0], -1))
logits = self._logits(features)
self._cur_value = self._value_branch(features).squeeze(1)
return logits, state
@override(TorchModelV2)
def value_function(self):
assert self._cur_value is not None, "must call forward() first"
return self._cur_value
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/torch/misc.py
|
Python
|
""" Code adapted from https://github.com/ikostrikov/pytorch-a3c"""
import numpy as np
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
def normc_initializer(std=1.0):
def initializer(tensor):
tensor.data.normal_(0, 1)
tensor.data *= std / torch.sqrt(
tensor.data.pow(2).sum(1, keepdim=True))
return initializer
def valid_padding(in_size, filter_size, stride_size):
"""Note: Padding is added to match TF conv2d `same` padding. See
www.tensorflow.org/versions/r0.12/api_docs/python/nn/convolution
Params:
in_size (tuple): Rows (Height), Column (Width) for input
stride_size (tuple): Rows (Height), Column (Width) for stride
filter_size (tuple): Rows (Height), Column (Width) for filter
Output:
padding (tuple): For input into torch.nn.ZeroPad2d
output (tuple): Output shape after padding and convolution
"""
in_height, in_width = in_size
filter_height, filter_width = filter_size
stride_height, stride_width = stride_size
out_height = np.ceil(float(in_height) / float(stride_height))
out_width = np.ceil(float(in_width) / float(stride_width))
pad_along_height = int(
((out_height - 1) * stride_height + filter_height - in_height))
pad_along_width = int(
((out_width - 1) * stride_width + filter_width - in_width))
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
padding = (pad_left, pad_right, pad_top, pad_bottom)
output = (out_height, out_width)
return padding, output
def _get_activation_fn(name):
if name == "tanh":
return nn.Tanh
elif name == "relu":
return nn.ReLU
elif name == "linear":
return None
else:
raise ValueError("Unknown activation: {}".format(name))
class SlimConv2d(nn.Module):
"""Simple mock of tf.slim Conv2d"""
def __init__(self,
in_channels,
out_channels,
kernel,
stride,
padding,
initializer=nn.init.xavier_uniform_,
activation_fn=nn.ReLU,
bias_init=0):
super(SlimConv2d, self).__init__()
layers = []
if padding:
layers.append(nn.ZeroPad2d(padding))
conv = nn.Conv2d(in_channels, out_channels, kernel, stride)
if initializer:
initializer(conv.weight)
nn.init.constant_(conv.bias, bias_init)
layers.append(conv)
if activation_fn:
layers.append(activation_fn())
self._model = nn.Sequential(*layers)
def forward(self, x):
return self._model(x)
class SlimFC(nn.Module):
"""Simple PyTorch version of `linear` function"""
def __init__(self,
in_size,
out_size,
initializer=None,
activation_fn=None,
bias_init=0):
super(SlimFC, self).__init__()
layers = []
linear = nn.Linear(in_size, out_size)
if initializer:
initializer(linear.weight)
nn.init.constant_(linear.bias, bias_init)
layers.append(linear)
if activation_fn:
layers.append(activation_fn())
self._model = nn.Sequential(*layers)
def forward(self, x):
return self._model(x)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/torch/torch_action_dist.py
|
Python
|
import numpy as np
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
class TorchDistributionWrapper(ActionDistribution):
"""Wrapper class for torch.distributions."""
@override(ActionDistribution)
def logp(self, actions):
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self):
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other):
return torch.distributions.kl.kl_divergence(self.dist, other.dist)
@override(ActionDistribution)
def sample(self):
return self.dist.sample()
class TorchCategorical(TorchDistributionWrapper):
"""Wrapper class for PyTorch Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model):
self.dist = torch.distributions.categorical.Categorical(logits=inputs)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class TorchDiagGaussian(TorchDistributionWrapper):
"""Wrapper class for PyTorch Normal distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model):
mean, log_std = torch.chunk(inputs, 2, dim=1)
self.dist = torch.distributions.normal.Normal(mean, torch.exp(log_std))
@override(TorchDistributionWrapper)
def logp(self, actions):
return TorchDistributionWrapper.logp(self, actions).sum(-1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/torch/torch_modelv2.py
|
Python
|
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils import try_import_torch
_, nn = try_import_torch()
@PublicAPI
class TorchModelV2(ModelV2):
"""Torch version of ModelV2.
Note that this class by itself is not a valid model unless you
inherit from nn.Module and implement forward() in a subclass."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
"""Initialize a TorchModelV2.
Here is an example implementation for a subclass
``MyModelClass(TorchModelV2, nn.Module)``::
def __init__(self, *args, **kwargs):
TorchModelV2.__init__(self, *args, **kwargs)
nn.Module.__init__(self)
self._hidden_layers = nn.Sequential(...)
self._logits = ...
self._value_branch = ...
"""
if not isinstance(self, nn.Module):
raise ValueError(
"Subclasses of TorchModelV2 must also inherit from "
"nn.Module, e.g., MyModel(TorchModelV2, nn.Module)")
ModelV2.__init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
framework="torch")
def forward(self, input_dict, state, seq_lens):
"""Call the model with the given input tensors and state.
Any complex observations (dicts, tuples, etc.) will be unpacked by
__call__ before being passed to forward(). To access the flattened
observation tensor, refer to input_dict["obs_flat"].
This method can be called any number of times. In eager execution,
each call to forward() will eagerly evaluate the model. In symbolic
execution, each call to forward creates a computation graph that
operates over the variables of this model (i.e., shares weights).
Custom models should override this instead of __call__.
Arguments:
input_dict (dict): dictionary of input tensors, including "obs",
"obs_flat", "prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, num_outputs]
Sample implementation for the ``MyModelClass`` example::
def forward(self, input_dict, state, seq_lens):
features = self._hidden_layers(input_dict["obs"])
self._value_out = self._value_branch(features)
return self._logits(features), state
"""
raise NotImplementedError
def value_function(self):
"""Return the value function estimate for the most recent forward pass.
Returns:
value estimate tensor of shape [BATCH].
Sample implementation for the ``MyModelClass`` example::
def value_function(self):
return self._value_out
"""
raise NotImplementedError
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/models/torch/visionnet.py
|
Python
|
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import normc_initializer, valid_padding, \
SlimConv2d, SlimFC
from ray.rllib.models.tf.visionnet_v1 import _get_filter_config
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_torch
_, nn = try_import_torch()
class VisionNetwork(TorchModelV2, nn.Module):
"""Generic vision network."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
filters = model_config.get("conv_filters")
if not filters:
filters = _get_filter_config(obs_space.shape)
layers = []
(w, h, in_channels) = obs_space.shape
in_size = [w, h]
for out_channels, kernel, stride in filters[:-1]:
padding, out_size = valid_padding(in_size, kernel,
[stride, stride])
layers.append(
SlimConv2d(in_channels, out_channels, kernel, stride, padding))
in_channels = out_channels
in_size = out_size
out_channels, kernel, stride = filters[-1]
layers.append(
SlimConv2d(in_channels, out_channels, kernel, stride, None))
self._convs = nn.Sequential(*layers)
self._logits = SlimFC(
out_channels, num_outputs, initializer=nn.init.xavier_uniform_)
self._value_branch = SlimFC(
out_channels, 1, initializer=normc_initializer())
self._cur_value = None
@override(TorchModelV2)
def forward(self, input_dict, state, seq_lens):
features = self._hidden_layers(input_dict["obs"].float())
logits = self._logits(features)
self._cur_value = self._value_branch(features).squeeze(1)
return logits, state
@override(TorchModelV2)
def value_function(self):
assert self._cur_value is not None, "must call forward() first"
return self._cur_value
def _hidden_layers(self, obs):
res = self._convs(obs.permute(0, 3, 1, 2)) # switch to channel-major
res = res.squeeze(3)
res = res.squeeze(2)
return res
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/__init__.py
|
Python
|
from ray.rllib.offline.io_context import IOContext
from ray.rllib.offline.json_reader import JsonReader
from ray.rllib.offline.json_writer import JsonWriter
from ray.rllib.offline.output_writer import OutputWriter, NoopOutput
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.offline.mixed_input import MixedInput
from ray.rllib.offline.shuffled_input import ShuffledInput
__all__ = [
"IOContext",
"JsonReader",
"JsonWriter",
"NoopOutput",
"OutputWriter",
"InputReader",
"MixedInput",
"ShuffledInput",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/input_reader.py
|
Python
|
import logging
import numpy as np
import threading
from ray.rllib.policy.sample_batch import MultiAgentBatch
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
@PublicAPI
class InputReader:
"""Input object for loading experiences in policy evaluation."""
@PublicAPI
def next(self):
"""Return the next batch of experiences read.
Returns:
SampleBatch or MultiAgentBatch read.
"""
raise NotImplementedError
@PublicAPI
def tf_input_ops(self, queue_size=1):
"""Returns TensorFlow queue ops for reading inputs from this reader.
The main use of these ops is for integration into custom model losses.
For example, you can use tf_input_ops() to read from files of external
experiences to add an imitation learning loss to your model.
This method creates a queue runner thread that will call next() on this
reader repeatedly to feed the TensorFlow queue.
Arguments:
queue_size (int): Max elements to allow in the TF queue.
Example:
>>> class MyModel(rllib.model.Model):
... def custom_loss(self, policy_loss, loss_inputs):
... reader = JsonReader(...)
... input_ops = reader.tf_input_ops()
... logits, _ = self._build_layers_v2(
... {"obs": input_ops["obs"]},
... self.num_outputs, self.options)
... il_loss = imitation_loss(logits, input_ops["action"])
... return policy_loss + il_loss
You can find a runnable version of this in examples/custom_loss.py.
Returns:
dict of Tensors, one for each column of the read SampleBatch.
"""
if hasattr(self, "_queue_runner"):
raise ValueError(
"A queue runner already exists for this input reader. "
"You can only call tf_input_ops() once per reader.")
logger.info("Reading initial batch of data from input reader.")
batch = self.next()
if isinstance(batch, MultiAgentBatch):
raise NotImplementedError(
"tf_input_ops() is not implemented for multi agent batches")
keys = [
k for k in sorted(batch.keys())
if np.issubdtype(batch[k].dtype, np.number)
]
dtypes = [batch[k].dtype for k in keys]
shapes = {
k: (-1, ) + s[1:]
for (k, s) in [(k, batch[k].shape) for k in keys]
}
queue = tf.FIFOQueue(capacity=queue_size, dtypes=dtypes, names=keys)
tensors = queue.dequeue()
logger.info("Creating TF queue runner for {}".format(self))
self._queue_runner = _QueueRunner(self, queue, keys, dtypes)
self._queue_runner.enqueue(batch)
self._queue_runner.start()
out = {k: tf.reshape(t, shapes[k]) for k, t in tensors.items()}
return out
class _QueueRunner(threading.Thread):
"""Thread that feeds a TF queue from a InputReader."""
def __init__(self, input_reader, queue, keys, dtypes):
threading.Thread.__init__(self)
self.sess = tf.get_default_session()
self.daemon = True
self.input_reader = input_reader
self.keys = keys
self.queue = queue
self.placeholders = [tf.placeholder(dtype) for dtype in dtypes]
self.enqueue_op = queue.enqueue(dict(zip(keys, self.placeholders)))
def enqueue(self, batch):
data = {
self.placeholders[i]: batch[key]
for i, key in enumerate(self.keys)
}
self.sess.run(self.enqueue_op, feed_dict=data)
def run(self):
while True:
try:
batch = self.input_reader.next()
self.enqueue(batch)
except Exception:
logger.exception("Error reading from input")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/io_context.py
|
Python
|
import os
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class IOContext:
"""Attributes to pass to input / output class constructors.
RLlib auto-sets these attributes when constructing input / output classes.
Attributes:
log_dir (str): Default logging directory.
config (dict): Configuration of the agent.
worker_index (int): When there are multiple workers created, this
uniquely identifies the current worker.
worker (RolloutWorker): rollout worker object reference.
"""
@PublicAPI
def __init__(self, log_dir=None, config=None, worker_index=0, worker=None):
self.log_dir = log_dir or os.getcwd()
self.config = config or {}
self.worker_index = worker_index
self.worker = worker
@PublicAPI
def default_sampler_input(self):
return self.worker.sampler
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/is_estimator.py
|
Python
|
from ray.rllib.offline.off_policy_estimator import OffPolicyEstimator, \
OffPolicyEstimate
from ray.rllib.utils.annotations import override
class ImportanceSamplingEstimator(OffPolicyEstimator):
"""The step-wise IS estimator.
Step-wise IS estimator described in https://arxiv.org/pdf/1511.03722.pdf"""
def __init__(self, policy, gamma):
OffPolicyEstimator.__init__(self, policy, gamma)
@override(OffPolicyEstimator)
def estimate(self, batch):
self.check_can_estimate_for(batch)
rewards, old_prob = batch["rewards"], batch["action_prob"]
new_prob = self.action_prob(batch)
# calculate importance ratios
p = []
for t in range(batch.count - 1):
if t == 0:
pt_prev = 1.0
else:
pt_prev = p[t - 1]
p.append(pt_prev * new_prob[t] / old_prob[t])
# calculate stepwise IS estimate
V_prev, V_step_IS = 0.0, 0.0
for t in range(batch.count - 1):
V_prev += rewards[t] * self.gamma**t
V_step_IS += p[t] * rewards[t] * self.gamma**t
estimation = OffPolicyEstimate(
"is", {
"V_prev": V_prev,
"V_step_IS": V_step_IS,
"V_gain_est": V_step_IS / max(1e-8, V_prev),
})
return estimation
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/offline/json_reader.py
|
Python
|
import glob
import json
import logging
import os
import random
import six
from six.moves.urllib.parse import urlparse
try:
from smart_open import smart_open
except ImportError:
smart_open = None
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.offline.io_context import IOContext
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch, \
DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.compression import unpack_if_needed
logger = logging.getLogger(__name__)
@PublicAPI
class JsonReader(InputReader):
"""Reader object that loads experiences from JSON file chunks.
The input files will be read from in an random order."""
@PublicAPI
def __init__(self, inputs, ioctx=None):
"""Initialize a JsonReader.
Arguments:
inputs (str|list): either a glob expression for files, e.g.,
"/tmp/**/*.json", or a list of single file paths or URIs, e.g.,
["s3://bucket/file.json", "s3://bucket/file2.json"].
ioctx (IOContext): current IO context object.
"""
self.ioctx = ioctx or IOContext()
if isinstance(inputs, six.string_types):
inputs = os.path.abspath(os.path.expanduser(inputs))
if os.path.isdir(inputs):
inputs = os.path.join(inputs, "*.json")
logger.warning(
"Treating input directory as glob pattern: {}".format(
inputs))
if urlparse(inputs).scheme:
raise ValueError(
"Don't know how to glob over `{}`, ".format(inputs) +
"please specify a list of files to read instead.")
else:
self.files = glob.glob(inputs)
elif type(inputs) is list:
self.files = inputs
else:
raise ValueError(
"type of inputs must be list or str, not {}".format(inputs))
if self.files:
logger.info("Found {} input files.".format(len(self.files)))
else:
raise ValueError("No files found matching {}".format(inputs))
self.cur_file = None
@override(InputReader)
def next(self):
batch = self._try_parse(self._next_line())
tries = 0
while not batch and tries < 100:
tries += 1
logger.debug("Skipping empty line in {}".format(self.cur_file))
batch = self._try_parse(self._next_line())
if not batch:
raise ValueError(
"Failed to read valid experience batch from file: {}".format(
self.cur_file))
return self._postprocess_if_needed(batch)
def _postprocess_if_needed(self, batch):
if not self.ioctx.config.get("postprocess_inputs"):
return batch
if isinstance(batch, SampleBatch):
out = []
for sub_batch in batch.split_by_episode():
out.append(self.ioctx.worker.policy_map[DEFAULT_POLICY_ID]
.postprocess_trajectory(sub_batch))
return SampleBatch.concat_samples(out)
else:
# TODO(ekl) this is trickier since the alignments between agent
# trajectories in the episode are not available any more.
raise NotImplementedError(
"Postprocessing of multi-agent data not implemented yet.")
def _try_parse(self, line):
line = line.strip()
if not line:
return None
try:
return _from_json(line)
except Exception:
logger.exception("Ignoring corrupt json record in {}: {}".format(
self.cur_file, line))
return None
def _next_line(self):
if not self.cur_file:
self.cur_file = self._next_file()
line = self.cur_file.readline()
tries = 0
while not line and tries < 100:
tries += 1
if hasattr(self.cur_file, "close"): # legacy smart_open impls
self.cur_file.close()
self.cur_file = self._next_file()
line = self.cur_file.readline()
if not line:
logger.debug("Ignoring empty file {}".format(self.cur_file))
if not line:
raise ValueError("Failed to read next line from files: {}".format(
self.files))
return line
def _next_file(self):
path = random.choice(self.files)
if urlparse(path).scheme:
if smart_open is None:
raise ValueError(
"You must install the `smart_open` module to read "
"from URIs like {}".format(path))
return smart_open(path, "r")
else:
return open(path, "r")
def _from_json(batch):
if isinstance(batch, bytes): # smart_open S3 doesn't respect "r"
batch = batch.decode("utf-8")
data = json.loads(batch)
if "type" in data:
data_type = data.pop("type")
else:
raise ValueError("JSON record missing 'type' field")
if data_type == "SampleBatch":
for k, v in data.items():
data[k] = unpack_if_needed(v)
return SampleBatch(data)
elif data_type == "MultiAgentBatch":
policy_batches = {}
for policy_id, policy_batch in data["policy_batches"].items():
inner = {}
for k, v in policy_batch.items():
inner[k] = unpack_if_needed(v)
policy_batches[policy_id] = SampleBatch(inner)
return MultiAgentBatch(policy_batches, data["count"])
else:
raise ValueError(
"Type field must be one of ['SampleBatch', 'MultiAgentBatch']",
data_type)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.