code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class CategoricalQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str ='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, output_bias_initializer=output_bias_initializer)
self.name = 'categorical_dqn_head'
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms
self.z_values = tf.cast(tf.constant(np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max,
self.ap.algorithm.atoms), dtype=tf.float32), dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
values_distribution = self.dense_layer(self.num_actions * self.num_atoms)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
values_distribution = tf.reshape(values_distribution, (tf.shape(values_distribution)[0], self.num_actions,
self.num_atoms))
# softmax on atoms dimension
self.output = tf.nn.softmax(values_distribution)
# calculate cross entropy loss
self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms),
name="distributions")
self.target = self.distributions
self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution)
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.z_values, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions * self.num_atoms),
"Reshape (output size = {} x {})".format(self.num_actions, self.num_atoms),
"Softmax"
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py | 0.885786 | 0.367951 | categorical_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.q_head import QHead
from rl_coach.base_parameters import AgentParameters
from rl_coach.memories.non_episodic import differentiable_neural_dictionary
from rl_coach.spaces import SpacesDefinition
class DNDQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'dnd_q_values_head'
self.DND_size = agent_parameters.algorithm.dnd_size
self.DND_key_error_threshold = agent_parameters.algorithm.DND_key_error_threshold
self.l2_norm_added_delta = agent_parameters.algorithm.l2_norm_added_delta
self.new_value_shift_coefficient = agent_parameters.algorithm.new_value_shift_coefficient
self.number_of_nn = agent_parameters.algorithm.number_of_knn
self.ap = agent_parameters
self.dnd_embeddings = [None] * self.num_actions
self.dnd_values = [None] * self.num_actions
self.dnd_indices = [None] * self.num_actions
self.dnd_distances = [None] * self.num_actions
if self.ap.memory.shared_memory:
self.shared_memory_scratchpad = self.ap.task_parameters.shared_memory_scratchpad
def _build_module(self, input_layer):
if hasattr(self.ap.task_parameters, 'checkpoint_restore_path') and\
self.ap.task_parameters.checkpoint_restore_path:
self.DND = differentiable_neural_dictionary.load_dnd(self.ap.task_parameters.checkpoint_restore_path)
else:
self.DND = differentiable_neural_dictionary.QDND(
self.DND_size, input_layer.get_shape()[-1], self.num_actions, self.new_value_shift_coefficient,
key_error_threshold=self.DND_key_error_threshold,
learning_rate=self.network_parameters.learning_rate,
num_neighbors=self.number_of_nn,
override_existing_keys=True)
# Retrieve info from DND dictionary
# We assume that all actions have enough entries in the DND
self.q_values = self.output = tf.transpose([
self._q_value(input_layer, action)
for action in range(self.num_actions)
])
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def _q_value(self, input_layer, action):
result = tf.py_func(self.DND.query,
[input_layer, action, self.number_of_nn],
[tf.float64, tf.float64, tf.int64])
self.dnd_embeddings[action] = tf.to_float(result[0])
self.dnd_values[action] = tf.to_float(result[1])
self.dnd_indices[action] = result[2]
# DND calculation
square_diff = tf.square(self.dnd_embeddings[action] - tf.expand_dims(input_layer, 1))
distances = tf.reduce_sum(square_diff, axis=2) + [self.l2_norm_added_delta]
self.dnd_distances[action] = distances
weights = 1.0 / distances
normalised_weights = weights / tf.reduce_sum(weights, axis=1, keep_dims=True)
q_value = tf.reduce_sum(self.dnd_values[action] * normalised_weights, axis=1)
q_value.set_shape((None,))
return q_value
def _post_build(self):
# DND gradients
self.dnd_embeddings_grad = tf.gradients(self.loss[0], self.dnd_embeddings)
self.dnd_values_grad = tf.gradients(self.loss[0], self.dnd_values)
def __str__(self):
result = [
"DND fetch (num outputs = {})".format(self.num_actions)
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/dnd_q_head.py | 0.845209 | 0.378028 | dnd_q_head.py | pypi |
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.losses.losses_impl import Reduction
from rl_coach.architectures.tensorflow_components.layers import Dense, convert_layer_class
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import force_list
from rl_coach.architectures.tensorflow_components.utils import squeeze_tensor
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class Head(object):
"""
A head is the final part of the network. It takes the embedding from the middleware embedder and passes it through
a neural network to produce the output of the network. There can be multiple heads in a network, and each one has
an assigned loss function. The heads are algorithm dependent.
"""
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int=0, loss_weight: float=1., is_local: bool=True, activation_function: str='relu',
dense_layer=Dense, is_training=False):
self.head_idx = head_idx
self.network_name = network_name
self.network_parameters = agent_parameters.network_wrappers[self.network_name]
self.name = "head"
self.output = []
self.loss = []
self.loss_type = []
self.regularizations = []
self.loss_weight = tf.Variable([float(w) for w in force_list(loss_weight)],
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.target = []
self.importance_weight = []
self.input = []
self.is_local = is_local
self.ap = agent_parameters
self.spaces = spaces
self.return_type = None
self.activation_function = activation_function
self.dense_layer = dense_layer
if self.dense_layer is None:
self.dense_layer = Dense
else:
self.dense_layer = convert_layer_class(self.dense_layer)
self.is_training = is_training
def __call__(self, input_layer):
"""
Wrapper for building the module graph including scoping and loss creation
:param input_layer: the input to the graph
:return: the output of the last layer and the target placeholder
"""
with tf.variable_scope(self.get_name(), initializer=tf.contrib.layers.xavier_initializer()):
self._build_module(squeeze_tensor(input_layer))
self.output = force_list(self.output)
self.target = force_list(self.target)
self.input = force_list(self.input)
self.loss_type = force_list(self.loss_type)
self.loss = force_list(self.loss)
self.regularizations = force_list(self.regularizations)
if self.is_local:
self.set_loss()
self._post_build()
if self.is_local:
return self.output, self.target, self.input, self.importance_weight
else:
return self.output, self.input
def _build_module(self, input_layer):
"""
Builds the graph of the module
This method is called early on from __call__. It is expected to store the graph
in self.output.
:param input_layer: the input to the graph
:return: None
"""
pass
def _post_build(self):
"""
Optional function that allows adding any extra definitions after the head has been fully defined
For example, this allows doing additional calculations that are based on the loss
:return: None
"""
pass
def get_name(self):
"""
Get a formatted name for the module
:return: the formatted name
"""
return '{}_{}'.format(self.name, self.head_idx)
def set_loss(self):
"""
Creates a target placeholder and loss function for each loss_type and regularization
:param loss_type: a tensorflow loss function
:param scope: the name scope to include the tensors in
:return: None
"""
# there are heads that define the loss internally, but we need to create additional placeholders for them
for idx in range(len(self.loss)):
importance_weight = tf.placeholder('float',
[None] + [1] * (len(self.target[idx].shape) - 1),
'{}_importance_weight'.format(self.get_name()))
self.importance_weight.append(importance_weight)
# add losses and target placeholder
for idx in range(len(self.loss_type)):
# create target placeholder
target = tf.placeholder('float', self.output[idx].shape, '{}_target'.format(self.get_name()))
self.target.append(target)
# create importance sampling weights placeholder
num_target_dims = len(self.target[idx].shape)
importance_weight = tf.placeholder('float', [None] + [1] * (num_target_dims - 1),
'{}_importance_weight'.format(self.get_name()))
self.importance_weight.append(importance_weight)
# compute the weighted loss. importance_weight weights over the samples in the batch, while self.loss_weight
# weights the specific loss of this head against other losses in this head or in other heads
loss_weight = self.loss_weight[idx]*importance_weight
loss = self.loss_type[idx](self.target[-1], self.output[idx],
scope=self.get_name(), reduction=Reduction.NONE, loss_collection=None)
# the loss is first summed over each sample in the batch and then the mean over the batch is taken
loss = tf.reduce_mean(loss_weight*tf.reduce_sum(loss, axis=list(range(1, num_target_dims))))
# we add the loss to the losses collection and later we will extract it in general_network
tf.losses.add_loss(loss)
self.loss.append(loss)
# add regularizations
for regularization in self.regularizations:
self.loss.append(regularization)
tf.losses.add_loss(regularization)
@classmethod
def path(cls):
return cls.__class__.__name__ | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/head.py | 0.903272 | 0.46132 | head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import DiscreteActionSpace
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps
class ACERPolicyHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'acer_policy_head'
self.return_type = ActionProbabilities
self.beta = None
self.action_penalty = None
# a scalar weight that penalizes low entropy values to encourage exploration
if hasattr(agent_parameters.algorithm, 'beta_entropy'):
# we set the beta value as a tf variable so it can be updated later if needed
self.beta = tf.Variable(float(agent_parameters.algorithm.beta_entropy),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.beta_placeholder = tf.placeholder('float')
self.set_beta = tf.assign(self.beta, self.beta_placeholder)
def _build_module(self, input_layer):
if isinstance(self.spaces.action, DiscreteActionSpace):
# create a discrete action network (softmax probabilities output)
self._build_discrete_net(input_layer, self.spaces.action)
else:
raise ValueError("only discrete action spaces are supported for ACER")
if self.is_local:
# add entropy regularization
if self.beta:
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
self.regularizations += [-tf.multiply(self.beta, self.entropy, name='entropy_regularization')]
# Truncated importance sampling with bias corrections
importance_sampling_weight = tf.placeholder(tf.float32, [None, self.num_actions],
name='{}_importance_sampling_weight'.format(self.get_name()))
self.input.append(importance_sampling_weight)
importance_sampling_weight_i = tf.placeholder(tf.float32, [None],
name='{}_importance_sampling_weight_i'.format(self.get_name()))
self.input.append(importance_sampling_weight_i)
V_values = tf.placeholder(tf.float32, [None], name='{}_V_values'.format(self.get_name()))
self.target.append(V_values)
Q_values = tf.placeholder(tf.float32, [None, self.num_actions], name='{}_Q_values'.format(self.get_name()))
self.input.append(Q_values)
Q_retrace = tf.placeholder(tf.float32, [None], name='{}_Q_retrace'.format(self.get_name()))
self.input.append(Q_retrace)
action_log_probs_wrt_policy = self.policy_distribution.log_prob(self.actions)
self.probability_loss = -tf.reduce_mean(action_log_probs_wrt_policy
* (Q_retrace - V_values)
* tf.minimum(self.ap.algorithm.importance_weight_truncation,
importance_sampling_weight_i))
log_probs_wrt_policy = tf.log(self.policy_probs + eps)
bias_correction_gain = tf.reduce_sum(log_probs_wrt_policy
* (Q_values - tf.expand_dims(V_values, 1))
* tf.nn.relu(1.0 - (self.ap.algorithm.importance_weight_truncation
/ (importance_sampling_weight + eps)))
* tf.stop_gradient(self.policy_probs),
axis=1)
self.bias_correction_loss = -tf.reduce_mean(bias_correction_gain)
self.loss = self.probability_loss + self.bias_correction_loss
tf.losses.add_loss(self.loss)
# Trust region
batch_size = tf.to_float(tf.shape(input_layer)[0])
average_policy = tf.placeholder(tf.float32, [None, self.num_actions],
name='{}_average_policy'.format(self.get_name()))
self.input.append(average_policy)
average_policy_distribution = tf.contrib.distributions.Categorical(probs=(average_policy + eps))
self.kl_divergence = tf.reduce_mean(tf.distributions.kl_divergence(average_policy_distribution,
self.policy_distribution))
if self.ap.algorithm.use_trust_region_optimization:
@tf.custom_gradient
def trust_region_layer(x):
def grad(g):
g = - g * batch_size
k = - average_policy / (self.policy_probs + eps)
adj = tf.nn.relu(
(tf.reduce_sum(k * g, axis=1) - self.ap.algorithm.max_KL_divergence)
/ (tf.reduce_sum(tf.square(k), axis=1) + eps))
g = g - tf.expand_dims(adj, 1) * k
return - g / batch_size
return tf.identity(x), grad
self.output = trust_region_layer(self.output)
def _build_discrete_net(self, input_layer, action_space):
self.num_actions = len(action_space.actions)
self.actions = tf.placeholder(tf.int32, [None], name='{}_actions'.format(self.get_name()))
self.input.append(self.actions)
policy_values = self.dense_layer(self.num_actions)(input_layer, name='fc')
self.policy_probs = tf.nn.softmax(policy_values, name='{}_policy'.format(self.get_name()))
# (the + eps is to prevent probability 0 which will cause the log later on to be -inf)
self.policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_probs + eps))
self.output = self.policy_probs | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/acer_policy_head.py | 0.894141 | 0.337558 | acer_policy_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import batchnorm_activation_dropout, Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
class DDPGActor(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh',
batchnorm: bool=True, dense_layer=Dense, is_training=False):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, is_training=is_training)
self.name = 'ddpg_actor_head'
self.return_type = ActionProbabilities
self.num_actions = self.spaces.action.shape
self.batchnorm = batchnorm
# bounded actions
self.output_scale = self.spaces.action.max_abs_range
# a scalar weight that penalizes high activation values (before the activation function) for the final layer
if hasattr(agent_parameters.algorithm, 'action_penalty'):
self.action_penalty = agent_parameters.algorithm.action_penalty
def _build_module(self, input_layer):
# mean
pre_activation_policy_values_mean = self.dense_layer(self.num_actions)(input_layer, name='fc_mean')
policy_values_mean = batchnorm_activation_dropout(input_layer=pre_activation_policy_values_mean,
batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout_rate=0,
is_training=self.is_training,
name="BatchnormActivationDropout_0")[-1]
self.policy_mean = tf.multiply(policy_values_mean, self.output_scale, name='output_mean')
if self.is_local:
# add a squared penalty on the squared pre-activation features of the action
if self.action_penalty and self.action_penalty != 0:
self.regularizations += \
[self.action_penalty * tf.reduce_mean(tf.square(pre_activation_policy_values_mean))]
self.output = [self.policy_mean]
def __str__(self):
result = [
'Dense (num outputs = {})'.format(self.num_actions[0])
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/ddpg_actor_head.py | 0.901496 | 0.274676 | ddpg_actor_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import Measurements
from rl_coach.spaces import SpacesDefinition
class MeasurementsPredictionHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'future_measurements_head'
self.num_actions = len(self.spaces.action.actions)
self.num_measurements = self.spaces.state['measurements'].shape[0]
self.num_prediction_steps = agent_parameters.algorithm.num_predicted_steps_ahead
self.multi_step_measurements_size = self.num_measurements * self.num_prediction_steps
self.return_type = Measurements
def _build_module(self, input_layer):
# This is almost exactly the same as Dueling Network but we predict the future measurements for each action
# actions expectation tower (expectation stream) - E
with tf.variable_scope("expectation_stream"):
expectation_stream = self.dense_layer(256)(input_layer, activation=self.activation_function, name='fc1')
expectation_stream = self.dense_layer(self.multi_step_measurements_size)(expectation_stream, name='output')
expectation_stream = tf.expand_dims(expectation_stream, axis=1)
# action fine differences tower (action stream) - A
with tf.variable_scope("action_stream"):
action_stream = self.dense_layer(256)(input_layer, activation=self.activation_function, name='fc1')
action_stream = self.dense_layer(self.num_actions * self.multi_step_measurements_size)(action_stream,
name='output')
action_stream = tf.reshape(action_stream,
(tf.shape(action_stream)[0], self.num_actions, self.multi_step_measurements_size))
action_stream = action_stream - tf.reduce_mean(action_stream, reduction_indices=1, keepdims=True)
# merge to future measurements predictions
self.output = tf.add(expectation_stream, action_stream, name='output')
self.target = tf.placeholder(tf.float32, [None, self.num_actions, self.multi_step_measurements_size],
name="targets")
targets_nonan = tf.where(tf.is_nan(self.target), self.output, self.target)
self.loss = tf.reduce_sum(tf.reduce_mean(tf.square(targets_nonan - self.output), reduction_indices=0))
tf.losses.add_loss(self.loss_weight[0] * self.loss)
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 256)",
"\tDense (num outputs = {})".format(self.multi_step_measurements_size),
"Action Advantage Stream - A",
"\tDense (num outputs = 256)",
"\tDense (num outputs = {})".format(self.num_actions * self.multi_step_measurements_size),
"\tReshape (new size = {} x {})".format(self.num_actions, self.multi_step_measurements_size),
"\tSubtract(A, Mean(A))".format(self.num_actions),
"Add (V, A)"
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/measurements_prediction_head.py | 0.903543 | 0.402568 | measurements_prediction_head.py | pypi |
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class RainbowQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms
self.name = 'rainbow_q_values_head'
self.z_values = tf.cast(tf.constant(np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max,
self.ap.algorithm.atoms), dtype=tf.float32), dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
# state value tower - V
with tf.variable_scope("state_value"):
state_value = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
state_value = self.dense_layer(self.num_atoms)(state_value, name='fc2')
state_value = tf.expand_dims(state_value, axis=1)
# action advantage tower - A
with tf.variable_scope("action_advantage"):
action_advantage = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
action_advantage = self.dense_layer(self.num_actions * self.num_atoms)(action_advantage, name='fc2')
action_advantage = tf.reshape(action_advantage, (tf.shape(input_layer)[0], self.num_actions,
self.num_atoms))
action_mean = tf.reduce_mean(action_advantage, axis=1, keepdims=True)
action_advantage = action_advantage - action_mean
# merge to state-action value function Q
values_distribution = tf.add(state_value, action_advantage, name='output')
# softmax on atoms dimension
self.output = tf.nn.softmax(values_distribution)
# calculate cross entropy loss
self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms),
name="distributions")
self.target = self.distributions
self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution)
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.z_values, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 512)",
"\tDense (num outputs = {})".format(self.num_atoms),
"Action Advantage Stream - A",
"\tDense (num outputs = 512)",
"\tDense (num outputs = {})".format(self.num_actions * self.num_atoms),
"\tReshape (new size = {} x {})".format(self.num_actions, self.num_atoms),
"\tSubtract(A, Mean(A))".format(self.num_actions),
"Add (V, A)",
"Softmax"
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py | 0.899351 | 0.368207 | rainbow_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
class PPOVHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'ppo_v_head'
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.return_type = ActionProbabilities
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
self.old_policy_value = tf.placeholder(tf.float32, [None], "old_policy_values")
self.input = [self.old_policy_value]
self.output = self.dense_layer(1)(input_layer, name='output',
kernel_initializer=normalized_columns_initializer(1.0),
bias_initializer=self.output_bias_initializer)
self.target = self.total_return = tf.placeholder(tf.float32, [None], name="total_return")
value_loss_1 = tf.square(self.output - self.target)
value_loss_2 = tf.square(self.old_policy_value +
tf.clip_by_value(self.output - self.old_policy_value,
-self.clip_likelihood_ratio_using_epsilon,
self.clip_likelihood_ratio_using_epsilon) - self.target)
self.vf_loss = tf.reduce_mean(tf.maximum(value_loss_1, value_loss_2))
self.loss = self.vf_loss
tf.losses.add_loss(self.loss)
def __str__(self):
result = [
"Dense (num outputs = 1)"
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/ppo_v_head.py | 0.890485 | 0.274899 | ppo_v_head.py | pypi |
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class QuantileRegressionQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, output_bias_initializer=output_bias_initializer)
self.name = 'quantile_regression_dqn_head'
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms # we use atom / quantile interchangeably
self.huber_loss_interval = agent_parameters.algorithm.huber_loss_interval # k
self.quantile_probabilities = tf.cast(
tf.constant(np.ones(self.ap.algorithm.atoms) / float(self.ap.algorithm.atoms), dtype=tf.float32),
dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
self.actions = tf.placeholder(tf.int32, [None, 2], name="actions")
self.quantile_midpoints = tf.placeholder(tf.float32, [None, self.num_atoms], name="quantile_midpoints")
self.input = [self.actions, self.quantile_midpoints]
# the output of the head is the N unordered quantile locations {theta_1, ..., theta_N}
quantiles_locations = self.dense_layer(self.num_actions * self.num_atoms)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
quantiles_locations = tf.reshape(quantiles_locations, (tf.shape(quantiles_locations)[0], self.num_actions, self.num_atoms))
self.output = quantiles_locations
self.quantiles = tf.placeholder(tf.float32, shape=(None, self.num_atoms), name="quantiles")
self.target = self.quantiles
# only the quantiles of the taken action are taken into account
quantiles_for_used_actions = tf.gather_nd(quantiles_locations, self.actions)
# reorder the output quantiles and the target quantiles as a preparation step for calculating the loss
# the output quantiles vector and the quantile midpoints are tiled as rows of a NxN matrix (N = num quantiles)
# the target quantiles vector is tiled as column of a NxN matrix
theta_i = tf.tile(tf.expand_dims(quantiles_for_used_actions, -1), [1, 1, self.num_atoms])
T_theta_j = tf.tile(tf.expand_dims(self.target, -2), [1, self.num_atoms, 1])
tau_i = tf.tile(tf.expand_dims(self.quantile_midpoints, -1), [1, 1, self.num_atoms])
# Huber loss of T(theta_j) - theta_i
error = T_theta_j - theta_i
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, self.huber_loss_interval)
huber_loss = self.huber_loss_interval * (abs_error - quadratic) + 0.5 * quadratic ** 2
# Quantile Huber loss
quantile_huber_loss = tf.abs(tau_i - tf.cast(error < 0, dtype=tf.float32)) * huber_loss
# Quantile regression loss (the probability for each quantile is 1/num_quantiles)
quantile_regression_loss = tf.reduce_sum(quantile_huber_loss) / float(self.num_atoms)
self.loss = quantile_regression_loss
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.quantile_probabilities, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions * self.num_atoms),
"Reshape (new size = {} x {})".format(self.num_actions, self.num_atoms)
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/quantile_regression_q_head.py | 0.910398 | 0.541045 | quantile_regression_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
class QHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'q_values_head'
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = 1
elif isinstance(self.spaces.action, DiscreteActionSpace):
self.num_actions = len(self.spaces.action.actions)
else:
raise ValueError(
'QHead does not support action spaces of type: {class_name}'.format(
class_name=self.spaces.action.__class__.__name__,
)
)
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard Q Network
self.q_values = self.output = self.dense_layer(self.num_actions)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions)
]
return '\n'.join(result)
def add_softmax_with_temperature(self):
temperature = self.ap.network_wrappers[self.network_name].softmax_temperature
temperature_scaled_outputs = self.q_values / temperature
return tf.nn.softmax(temperature_scaled_outputs, name="softmax") | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/q_head.py | 0.892668 | 0.270673 | q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import VStateValue
from rl_coach.spaces import SpacesDefinition
class TD3VHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, initializer='xavier', output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'td3_v_values_head'
self.return_type = VStateValue
self.loss_type = []
self.initializer = initializer
self.loss = []
self.output = []
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard V Network
q_outputs = []
self.target = tf.placeholder(tf.float32, shape=(None, 1), name="q_networks_min_placeholder")
for i in range(input_layer.shape[0]): # assuming that the actual size is 2, as there are two critic networks
if self.initializer == 'normalized_columns':
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1),
kernel_initializer=normalized_columns_initializer(1.0),
bias_initializer=self.output_bias_initializer),)
elif self.initializer == 'xavier' or self.initializer is None:
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1),
bias_initializer=self.output_bias_initializer))
self.output.append(q_outputs[i])
self.loss.append(tf.reduce_mean((self.target-q_outputs[i])**2))
self.output.append(tf.reduce_min(q_outputs, axis=0))
self.output.append(tf.reduce_mean(self.output[0]))
self.loss = sum(self.loss)
tf.losses.add_loss(self.loss)
def __str__(self):
result = [
"Q1 Action-Value Stream",
"\tDense (num outputs = 1)",
"Q2 Action-Value Stream",
"\tDense (num outputs = 1)",
"Min (Q1, Q2)"
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/td3_v_head.py | 0.869008 | 0.284511 | td3_v_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
from rl_coach.utils import force_list
class RegressionHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, scheme=[Dense(256), Dense(256)], output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'regression_head'
self.scheme = scheme
self.layers = []
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = self.spaces.action.shape[0]
elif isinstance(self.spaces.action, DiscreteActionSpace):
self.num_actions = len(self.spaces.action.actions)
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
self.layers.append(input_layer)
for idx, layer_params in enumerate(self.scheme):
self.layers.extend(force_list(
layer_params(input_layer=self.layers[-1], name='{}_{}'.format(layer_params.__class__.__name__, idx))
))
self.layers.append(self.dense_layer(self.num_actions)(self.layers[-1], name='output',
bias_initializer=self.output_bias_initializer))
self.output = self.layers[-1]
def __str__(self):
result = []
for layer in self.layers:
result.append(str(layer))
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/cil_head.py | 0.838812 | 0.245571 | cil_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import batchnorm_activation_dropout, Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import Embedding
from rl_coach.spaces import SpacesDefinition, BoxActionSpace
class WolpertingerActorHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh',
batchnorm: bool=True, dense_layer=Dense, is_training=False):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, is_training=is_training)
self.name = 'wolpertinger_actor_head'
self.return_type = Embedding
self.action_embedding_width = agent_parameters.algorithm.action_embedding_width
self.batchnorm = batchnorm
self.output_scale = self.spaces.action.filtered_action_space.max_abs_range if \
(hasattr(self.spaces.action, 'filtered_action_space') and
isinstance(self.spaces.action.filtered_action_space, BoxActionSpace)) \
else None
def _build_module(self, input_layer):
# mean
pre_activation_policy_value = self.dense_layer(self.action_embedding_width)(input_layer,
name='actor_action_embedding')
self.proto_action = batchnorm_activation_dropout(input_layer=pre_activation_policy_value,
batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout_rate=0,
is_training=self.is_training,
name="BatchnormActivationDropout_0")[-1]
if self.output_scale is not None:
self.proto_action = tf.multiply(self.proto_action, self.output_scale, name='proto_action')
self.output = [self.proto_action]
def __str__(self):
result = [
'Dense (num outputs = {})'.format(self.action_embedding_width)
]
return '\n'.join(result) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/wolpertinger_actor_head.py | 0.814348 | 0.234461 | wolpertinger_actor_head.py | pypi |
import os
from abc import ABC, abstractmethod
import threading
import pickle
import redis
import numpy as np
from rl_coach.utils import get_latest_checkpoint
class SharedRunningStatsSubscribe(threading.Thread):
def __init__(self, shared_running_stats):
super().__init__()
self.shared_running_stats = shared_running_stats
self.redis_address = self.shared_running_stats.pubsub.params.redis_address
self.redis_port = self.shared_running_stats.pubsub.params.redis_port
self.redis_connection = redis.Redis(self.redis_address, self.redis_port)
self.pubsub = self.redis_connection.pubsub()
self.channel = self.shared_running_stats.channel
self.pubsub.subscribe(self.channel)
def run(self):
for message in self.pubsub.listen():
try:
obj = pickle.loads(message['data'])
self.shared_running_stats.push_val(obj)
except Exception:
continue
class SharedRunningStats(ABC):
def __init__(self, name="", pubsub_params=None):
self.name = name
self.pubsub = None
if pubsub_params:
self.channel = "channel-srs-{}".format(self.name)
from rl_coach.memories.backend.memory_impl import get_memory_backend
self.pubsub = get_memory_backend(pubsub_params)
subscribe_thread = SharedRunningStatsSubscribe(self)
subscribe_thread.daemon = True
subscribe_thread.start()
@abstractmethod
def set_params(self, shape=[1], clip_values=None):
pass
def push(self, x):
if self.pubsub:
self.pubsub.redis_connection.publish(self.channel, pickle.dumps(x))
return
self.push_val(x)
@abstractmethod
def push_val(self, x):
pass
@property
@abstractmethod
def n(self):
pass
@property
@abstractmethod
def mean(self):
pass
@property
@abstractmethod
def var(self):
pass
@property
@abstractmethod
def std(self):
pass
@property
@abstractmethod
def shape(self):
pass
@abstractmethod
def normalize(self, batch):
pass
@abstractmethod
def set_session(self, sess):
pass
@abstractmethod
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
pass
@abstractmethod
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
pass
class NumpySharedRunningStats(SharedRunningStats):
def __init__(self, name, epsilon=1e-2, pubsub_params=None):
super().__init__(name=name, pubsub_params=pubsub_params)
self._count = epsilon
self.epsilon = epsilon
self.checkpoint_file_extension = 'srs'
def set_params(self, shape=[1], clip_values=None):
self._shape = shape
self._mean = np.zeros(shape)
self._std = np.sqrt(self.epsilon) * np.ones(shape)
self._sum = np.zeros(shape)
self._sum_squares = self.epsilon * np.ones(shape)
self.clip_values = clip_values
def push_val(self, samples: np.ndarray):
assert len(samples.shape) >= 2 # we should always have a batch dimension
assert samples.shape[1:] == self._mean.shape, 'RunningStats input shape mismatch'
samples = samples.astype(np.float64)
self._sum += samples.sum(axis=0)
self._sum_squares += np.square(samples).sum(axis=0)
self._count += np.shape(samples)[0]
self._mean = self._sum / self._count
self._std = np.sqrt(np.maximum(
(self._sum_squares - self._count * np.square(self._mean)) / np.maximum(self._count - 1, 1),
self.epsilon))
@property
def n(self):
return self._count
@property
def mean(self):
return self._mean
@property
def var(self):
return self._std ** 2
@property
def std(self):
return self._std
@property
def shape(self):
return self._mean.shape
def normalize(self, batch):
batch = (batch - self.mean) / (self.std + 1e-15)
return np.clip(batch, *self.clip_values)
def set_session(self, sess):
# no session for the numpy implementation
pass
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
dict_to_save = {'_mean': self._mean,
'_std': self._std,
'_count': self._count,
'_sum': self._sum,
'_sum_squares': self._sum_squares}
with open(os.path.join(checkpoint_dir, str(checkpoint_prefix) + '.' + self.checkpoint_file_extension), 'wb') as f:
pickle.dump(dict_to_save, f, pickle.HIGHEST_PROTOCOL)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
latest_checkpoint_filename = get_latest_checkpoint(checkpoint_dir, checkpoint_prefix,
self.checkpoint_file_extension)
if latest_checkpoint_filename == '':
raise ValueError("Could not find NumpySharedRunningStats checkpoint file. ")
with open(os.path.join(checkpoint_dir, str(latest_checkpoint_filename)), 'rb') as f:
saved_dict = pickle.load(f)
self.__dict__.update(saved_dict) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/utilities/shared_running_stats.py | 0.789923 | 0.217379 | shared_running_stats.py | pypi |
import os
import numpy as np
from rl_coach.core_types import RewardType
from rl_coach.filters.reward.reward_filter import RewardFilter
from rl_coach.spaces import RewardSpace
from rl_coach.utilities.shared_running_stats import NumpySharedRunningStats
class RewardNormalizationFilter(RewardFilter):
"""
Normalizes the reward values with a running mean and standard deviation of
all the rewards seen so far. When working with multiple workers, the statistics used for the normalization operation
are accumulated over all the workers.
"""
def __init__(self, clip_min: float=-5.0, clip_max: float=5.0):
"""
:param clip_min: The minimum value to allow after normalizing the reward
:param clip_max: The maximum value to allow after normalizing the reward
"""
super().__init__()
self.clip_min = clip_min
self.clip_max = clip_max
self.running_rewards_stats = None
def set_device(self, device, memory_backend_params=None, mode='numpy') -> None:
"""
An optional function that allows the filter to get the device if it is required to use tensorflow ops
:param device: the device to use
:return: None
"""
if mode == 'tf':
from rl_coach.architectures.tensorflow_components.shared_variables import TFSharedRunningStats
self.running_rewards_stats = TFSharedRunningStats(device, name='rewards_stats', create_ops=False,
pubsub_params=memory_backend_params)
elif mode == 'numpy':
self.running_rewards_stats = NumpySharedRunningStats(name='rewards_stats',
pubsub_params=memory_backend_params)
def set_session(self, sess) -> None:
"""
An optional function that allows the filter to get the session if it is required to use tensorflow ops
:param sess: the session
:return: None
"""
self.running_rewards_stats.set_session(sess)
def filter(self, reward: RewardType, update_internal_state: bool=True) -> RewardType:
if update_internal_state:
if not isinstance(reward, np.ndarray) or len(reward.shape) < 2:
reward = np.array([[reward]])
self.running_rewards_stats.push(reward)
return self.running_rewards_stats.normalize(reward).squeeze()
def get_filtered_reward_space(self, input_reward_space: RewardSpace) -> RewardSpace:
self.running_rewards_stats.set_params(shape=(1,), clip_values=(self.clip_min, self.clip_max))
return input_reward_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_rewards_stats.save_state_to_checkpoint(checkpoint_dir, checkpoint_prefix)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_rewards_stats.restore_state_from_checkpoint(checkpoint_dir, checkpoint_prefix) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/reward/reward_normalization_filter.py | 0.806319 | 0.414662 | reward_normalization_filter.py | pypi |
import os
import numpy as np
import pickle
from rl_coach.core_types import RewardType
from rl_coach.filters.reward.reward_filter import RewardFilter
from rl_coach.spaces import RewardSpace
from rl_coach.utils import get_latest_checkpoint
class RewardEwmaNormalizationFilter(RewardFilter):
"""
Normalizes the reward values based on Exponential Weighted Moving Average.
"""
def __init__(self, alpha: float = 0.01):
"""
:param alpha: the degree of weighting decrease, a constant smoothing factor between 0 and 1.
A higher alpha discounts older observations faster
"""
super().__init__()
self.alpha = alpha
self.moving_average = 0
self.initialized = False
self.checkpoint_file_extension = 'ewma'
self.supports_batching = True
def filter(self, reward: RewardType, update_internal_state: bool=True) -> RewardType:
if not isinstance(reward, np.ndarray):
reward = np.array(reward)
if update_internal_state:
mean_rewards = np.mean(reward)
if not self.initialized:
self.moving_average = mean_rewards
self.initialized = True
else:
self.moving_average += self.alpha * (mean_rewards - self.moving_average)
return reward - self.moving_average
def get_filtered_reward_space(self, input_reward_space: RewardSpace) -> RewardSpace:
return input_reward_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
dict_to_save = {'moving_average': self.moving_average}
with open(os.path.join(checkpoint_dir, str(checkpoint_prefix) + '.' + self.checkpoint_file_extension), 'wb') as f:
pickle.dump(dict_to_save, f, pickle.HIGHEST_PROTOCOL)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
latest_checkpoint_filename = get_latest_checkpoint(checkpoint_dir, checkpoint_prefix,
self.checkpoint_file_extension)
if latest_checkpoint_filename == '':
raise ValueError("Could not find RewardEwmaNormalizationFilter checkpoint file. ")
with open(os.path.join(checkpoint_dir, str(latest_checkpoint_filename)), 'rb') as f:
saved_dict = pickle.load(f)
self.__dict__.update(saved_dict) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/reward/reward_ewma_normalization_filter.py | 0.770637 | 0.310838 | reward_ewma_normalization_filter.py | pypi |
from typing import Union
import numpy as np
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import BoxActionSpace
class BoxMasking(ActionFilter):
"""
Masks part of the action space to enforce the agent to work in a defined space. For example,
if the original action space is between -1 and 1, then this filter can be used in order to constrain the agent actions
to the range 0 and 1 instead. This essentially masks the range -1 and 0 from the agent.
The resulting action space will be shifted and will always start from 0 and have the size of the unmasked area.
"""
def __init__(self,
masked_target_space_low: Union[None, int, float, np.ndarray],
masked_target_space_high: Union[None, int, float, np.ndarray]):
"""
:param masked_target_space_low: the lowest values that can be chosen in the target action space
:param masked_target_space_high: the highest values that can be chosen in the target action space
"""
self.masked_target_space_low = masked_target_space_low
self.masked_target_space_high = masked_target_space_high
self.offset = masked_target_space_low
super().__init__()
def set_masking(self, masked_target_space_low: Union[None, int, float, np.ndarray],
masked_target_space_high: Union[None, int, float, np.ndarray]):
self.masked_target_space_low = masked_target_space_low
self.masked_target_space_high = masked_target_space_high
self.offset = masked_target_space_low
if self.output_action_space:
self.validate_output_action_space(self.output_action_space)
self.input_action_space = BoxActionSpace(self.output_action_space.shape,
low=0,
high=self.masked_target_space_high - self.masked_target_space_low)
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
if self.masked_target_space_low is None or self.masked_target_space_high is None:
raise ValueError("The masking target space size was not set. Please call set_masking.")
if not (np.all(output_action_space.low <= self.masked_target_space_low)
and np.all(self.masked_target_space_low <= output_action_space.high)):
raise ValueError("The low values for masking the action space ({}) are not within the range of the "
"target space (low = {}, high = {})"
.format(self.masked_target_space_low, output_action_space.low, output_action_space.high))
if not (np.all(output_action_space.low <= self.masked_target_space_high)
and np.all(self.masked_target_space_high <= output_action_space.high)):
raise ValueError("The high values for masking the action space ({}) are not within the range of the "
"target space (low = {}, high = {})"
.format(self.masked_target_space_high, output_action_space.low, output_action_space.high))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> BoxActionSpace:
self.output_action_space = output_action_space
self.input_action_space = BoxActionSpace(output_action_space.shape,
low=0,
high=self.masked_target_space_high - self.masked_target_space_low)
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return action + self.offset | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/box_masking.py | 0.93281 | 0.70808 | box_masking.py | pypi |
from rl_coach.core_types import ActionType
from rl_coach.filters.filter import Filter
from rl_coach.spaces import ActionSpace
class ActionFilter(Filter):
def __init__(self, input_action_space: ActionSpace=None):
self.input_action_space = input_action_space
self.output_action_space = None
super().__init__()
def get_unfiltered_action_space(self, output_action_space: ActionSpace) -> ActionSpace:
"""
This function should contain the logic for getting the unfiltered action space
:param output_action_space: the output action space
:return: the unfiltered action space
"""
return output_action_space
def validate_output_action_space(self, output_action_space: ActionSpace):
"""
A function that implements validation of the output action space
:param output_action_space: the input action space
:return: None
"""
pass
def validate_output_action(self, action: ActionType):
"""
A function that verifies that the given action is in the expected output action space
:param action: an action to validate
:return: None
"""
if not self.output_action_space.contains(action):
raise ValueError("The given action ({}) does not match the action space ({})"
.format(action, self.output_action_space))
def filter(self, action: ActionType) -> ActionType:
"""
A function that transforms from the agent's action space to the environment's action space
:param action: an action to transform
:return: transformed action
"""
raise NotImplementedError("")
def reverse_filter(self, action: ActionType) -> ActionType:
"""
A function that transforms from the environment's action space to the agent's action space
:param action: an action to transform
:return: transformed action
"""
raise NotImplementedError("") | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/action_filter.py | 0.822653 | 0.531757 | action_filter.py | pypi |
from typing import Union
import numpy as np
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import BoxActionSpace
class LinearBoxToBoxMap(ActionFilter):
"""
A linear mapping of two box action spaces. For example, if the action space of the
environment consists of continuous actions between 0 and 1, and we want the agent to choose actions between -1 and 1,
the LinearBoxToBoxMap can be used to map the range -1 and 1 to the range 0 and 1 in a linear way. This means that the
action -1 will be mapped to 0, the action 1 will be mapped to 1, and the rest of the actions will be linearly mapped
between those values.
"""
def __init__(self,
input_space_low: Union[None, int, float, np.ndarray],
input_space_high: Union[None, int, float, np.ndarray]):
"""
:param input_space_low: the low values of the desired action space
:param input_space_high: the high values of the desired action space
"""
self.input_space_low = input_space_low
self.input_space_high = input_space_high
self.rescale = None
self.offset = None
super().__init__()
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> BoxActionSpace:
self.input_action_space = BoxActionSpace(output_action_space.shape, self.input_space_low, self.input_space_high)
self.rescale = \
(output_action_space.high - output_action_space.low) / (self.input_space_high - self.input_space_low)
self.offset = output_action_space.low - self.input_space_low
self.output_action_space = output_action_space
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return self.output_action_space.low + (action - self.input_space_low) * self.rescale | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/linear_box_to_box_map.py | 0.940865 | 0.67145 | linear_box_to_box_map.py | pypi |
from typing import List
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import DiscreteActionSpace, ActionSpace
class PartialDiscreteActionSpaceMap(ActionFilter):
"""
Partial map of two countable action spaces. For example, consider an environment
with a MultiSelect action space (select multiple actions at the same time, such as jump and go right), with 8 actual
MultiSelect actions. If we want the agent to be able to select only 5 of those actions by their index (0-4), we can
map a discrete action space with 5 actions into the 5 selected MultiSelect actions. This will both allow the agent to
use regular discrete actions, and mask 3 of the actions from the agent.
"""
def __init__(self, target_actions: List[ActionType]=None, descriptions: List[str]=None):
"""
:param target_actions: A partial list of actions from the target space to map to.
:param descriptions: a list of descriptions of each of the actions
"""
self.target_actions = target_actions
self.descriptions = descriptions
super().__init__()
def validate_output_action_space(self, output_action_space: ActionSpace):
if not self.target_actions:
raise ValueError("The target actions were not set")
for v in self.target_actions:
if not output_action_space.contains(v):
raise ValueError("The values in the output actions ({}) do not match the output action "
"space definition ({})".format(v, output_action_space))
def get_unfiltered_action_space(self, output_action_space: ActionSpace) -> DiscreteActionSpace:
self.output_action_space = output_action_space
self.input_action_space = DiscreteActionSpace(len(self.target_actions), self.descriptions,
filtered_action_space=output_action_space)
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return self.target_actions[action]
def reverse_filter(self, action: ActionType) -> ActionType:
return [(action == x).all() for x in self.target_actions].index(True) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/partial_discrete_action_space_map.py | 0.881672 | 0.534309 | partial_discrete_action_space_map.py | pypi |
from typing import Union, List
import numpy as np
from rl_coach.filters.action.box_discretization import BoxDiscretization
from rl_coach.filters.action.partial_discrete_action_space_map import PartialDiscreteActionSpaceMap
from rl_coach.spaces import AttentionActionSpace, BoxActionSpace, DiscreteActionSpace
class AttentionDiscretization(PartialDiscreteActionSpaceMap):
"""
Discretizes an **AttentionActionSpace**. The attention action space defines the actions
as choosing sub-boxes in a given box. For example, consider an image of size 100x100, where the action is choosing
a crop window of size 20x20 to attend to in the image. AttentionDiscretization allows discretizing the possible crop
windows to choose into a finite number of options, and map a discrete action space into those crop windows.
Warning! this will currently only work for attention spaces with 2 dimensions.
"""
def __init__(self, num_bins_per_dimension: Union[int, List[int]], force_int_bins=False):
"""
:param num_bins_per_dimension: Number of discrete bins to use for each dimension of the action space
:param force_int_bins: If set to True, all the bins will represent integer coordinates in space.
"""
# we allow specifying either a single number for all dimensions, or a single number per dimension in the target
# action space
self.num_bins_per_dimension = num_bins_per_dimension
self.force_int_bins = force_int_bins
# TODO: this will currently only work for attention spaces with 2 dimensions. generalize it.
super().__init__()
def validate_output_action_space(self, output_action_space: AttentionActionSpace):
if not isinstance(output_action_space, AttentionActionSpace):
raise ValueError("AttentionActionSpace discretization only works with an output space of type AttentionActionSpace. "
"The given output space is {}".format(output_action_space))
def get_unfiltered_action_space(self, output_action_space: AttentionActionSpace) -> DiscreteActionSpace:
if isinstance(self.num_bins_per_dimension, int):
self.num_bins_per_dimension = [self.num_bins_per_dimension] * output_action_space.shape[0]
# create a discrete to linspace map to ease the extraction of attention actions
discrete_to_box = BoxDiscretization([n+1 for n in self.num_bins_per_dimension],
self.force_int_bins)
discrete_to_box.get_unfiltered_action_space(BoxActionSpace(output_action_space.shape,
output_action_space.low,
output_action_space.high), )
rows, cols = self.num_bins_per_dimension
start_ind = [i * (cols + 1) + j for i in range(rows + 1) if i < rows for j in range(cols + 1) if j < cols]
end_ind = [i + cols + 2 for i in start_ind]
self.target_actions = [np.array([discrete_to_box.target_actions[start],
discrete_to_box.target_actions[end]])
for start, end in zip(start_ind, end_ind)]
return super().get_unfiltered_action_space(output_action_space) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/attention_discretization.py | 0.850779 | 0.607576 | attention_discretization.py | pypi |
from itertools import product
from typing import Union, List
import numpy as np
from rl_coach.filters.action.partial_discrete_action_space_map import PartialDiscreteActionSpaceMap
from rl_coach.spaces import BoxActionSpace, DiscreteActionSpace
class BoxDiscretization(PartialDiscreteActionSpaceMap):
"""
Discretizes a continuous action space into a discrete action space, allowing the usage of
agents such as DQN for continuous environments such as MuJoCo. Given the number of bins to discretize into, the
original continuous action space is uniformly separated into the given number of bins, each mapped to a discrete
action index. Each discrete action is mapped to a single N dimensional action in the BoxActionSpace action space.
For example, if the original actions space is between -1 and 1 and 5 bins were selected, the new action
space will consist of 5 actions mapped to -1, -0.5, 0, 0.5 and 1.
"""
def __init__(self, num_bins_per_dimension: Union[int, List[int]], force_int_bins=False):
"""
:param num_bins_per_dimension: The number of bins to use for each dimension of the target action space.
The bins will be spread out uniformly over this space
:param force_int_bins: force the bins to represent only integer actions. for example, if the action space is in
the range 0-10 and there are 5 bins, then the bins will be placed at 0, 2, 5, 7, 10,
instead of 0, 2.5, 5, 7.5, 10.
"""
# we allow specifying either a single number for all dimensions, or a single number per dimension in the target
# action space
self.num_bins_per_dimension = num_bins_per_dimension
self.force_int_bins = force_int_bins
super().__init__()
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
if len(self.num_bins_per_dimension) != output_action_space.shape:
# TODO: this check is not sufficient. it does not deal with actions spaces with more than one axis
raise ValueError("The length of the list of bins per dimension ({}) does not match the number of "
"dimensions in the action space ({})"
.format(len(self.num_bins_per_dimension), output_action_space))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> DiscreteActionSpace:
if isinstance(self.num_bins_per_dimension, int):
self.num_bins_per_dimension = np.ones(output_action_space.shape) * self.num_bins_per_dimension
bins = []
for i in range(len(output_action_space.low)):
dim_bins = np.linspace(output_action_space.low[i], output_action_space.high[i],
self.num_bins_per_dimension[i])
if self.force_int_bins:
dim_bins = dim_bins.astype(int)
bins.append(dim_bins)
self.target_actions = [list(action) for action in list(product(*bins))]
return super().get_unfiltered_action_space(output_action_space) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/action/box_discretization.py | 0.847684 | 0.660864 | box_discretization.py | pypi |
import copy
from collections import deque
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, VectorObservationSpace
class LazyStack(object):
"""
A lazy version of np.stack which avoids copying the memory until it is
needed.
"""
def __init__(self, history, axis=None):
self.history = copy.copy(history)
self.axis = axis
def __array__(self, dtype=None):
array = np.stack(self.history, axis=self.axis)
if dtype is not None:
array = array.astype(dtype)
return array
class ObservationStackingFilter(ObservationFilter):
"""
Stacks several observations on top of each other. For image observation this will
create a 3D blob. The stacking is done in a lazy manner in order to reduce memory consumption. To achieve this,
a LazyStack object is used in order to wrap the observations in the stack. For this reason, the
ObservationStackingFilter **must** be the last filter in the inputs filters stack.
This filter is stateful since it stores the previous step result and depends on it.
The filter adds an additional dimension to the output observation.
Warning!!! The filter replaces the observation with a LazyStack object, so no filters should be
applied after this filter. applying more filters will cause the LazyStack object to be converted to a numpy array
and increase the memory footprint.
"""
def __init__(self, stack_size: int, stacking_axis: int=-1):
"""
:param stack_size: the number of previous observations in the stack
:param stacking_axis: the axis on which to stack the observation on
"""
super().__init__()
self.stack_size = stack_size
self.stacking_axis = stacking_axis
self.stack = []
self.input_observation_space = None
if stack_size <= 0:
raise ValueError("The stack shape must be a positive number")
if type(stack_size) != int:
raise ValueError("The stack shape must be of int type")
@property
def next_filter(self) -> 'InputFilter':
return self._next_filter
@next_filter.setter
def next_filter(self, val: 'InputFilter'):
raise ValueError("ObservationStackingFilter can have no other filters after it since they break its "
"functionality")
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if len(self.stack) > 0 and not input_observation_space.contains(self.stack[-1]):
raise ValueError("The given input observation space is different than the observations already stored in"
"the filters memory")
if input_observation_space.num_dimensions <= self.stacking_axis:
raise ValueError("The stacking axis is larger than the number of dimensions in the observation space")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
if len(self.stack) == 0:
self.stack = deque([observation] * self.stack_size, maxlen=self.stack_size)
else:
if update_internal_state:
self.stack.append(observation)
observation = LazyStack(self.stack, self.stacking_axis)
if isinstance(self.input_observation_space, VectorObservationSpace):
# when stacking vectors, we cannot avoid copying the memory as we're flattening it all
observation = np.array(observation).flatten()
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
if isinstance(input_observation_space, VectorObservationSpace):
self.input_observation_space = input_observation_space = VectorObservationSpace(input_observation_space.shape * self.stack_size)
else:
if self.stacking_axis == -1:
input_observation_space.shape = np.append(input_observation_space.shape, values=[self.stack_size], axis=0)
else:
input_observation_space.shape = np.insert(input_observation_space.shape, obj=self.stacking_axis,
values=[self.stack_size], axis=0)
return input_observation_space
def reset(self) -> None:
self.stack = [] | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_stacking_filter.py | 0.898674 | 0.691172 | observation_stacking_filter.py | pypi |
from skimage.transform import resize
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationRescaleSizeByFactorFilter(ObservationFilter):
"""
Rescales an image observation by some factor. For example, the image size
can be reduced by a factor of 2.
"""
def __init__(self, rescale_factor: float):
"""
:param rescale_factor: the factor by which the observation will be rescaled
"""
super().__init__()
self.rescale_factor = float(rescale_factor)
# TODO: allow selecting the channels dim
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not 2 <= input_observation_space.num_dimensions <= 3:
raise ValueError("The rescale filter only applies to image observations where the number of dimensions is"
"either 2 (grayscale) or 3 (RGB). The number of dimensions defined for the "
"output observation was {}".format(input_observation_space.num_dimensions))
if input_observation_space.num_dimensions == 3 and input_observation_space.shape[-1] != 3:
raise ValueError("Observations with 3 dimensions must have 3 channels in the last axis (RGB)")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
observation = observation.astype('uint8')
rescaled_output_size = tuple([int(self.rescale_factor * dim) for dim in observation.shape[:2]])
if len(observation.shape) == 3:
rescaled_output_size += (3,)
# rescale
observation = resize(observation, rescaled_output_size, anti_aliasing=False, preserve_range=True).astype('uint8')
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.shape[:2] = (input_observation_space.shape[:2] * self.rescale_factor).astype('int')
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_rescale_size_by_factor_filter.py | 0.815122 | 0.735452 | observation_rescale_size_by_factor_filter.py | pypi |
import copy
from skimage.transform import resize
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, PlanarMapsObservationSpace, ImageObservationSpace
class ObservationRescaleToSizeFilter(ObservationFilter):
"""
Rescales an image observation to a given size. The target size does not
necessarily keep the aspect ratio of the original observation.
Warning: this requires the input observation to be of type uint8 due to scipy requirements!
"""
def __init__(self, output_observation_space: PlanarMapsObservationSpace):
"""
:param output_observation_space: the output observation space
"""
super().__init__()
self.output_observation_space = output_observation_space
if not isinstance(output_observation_space, PlanarMapsObservationSpace):
raise ValueError("The rescale filter only applies to observation spaces that inherit from "
"PlanarMapsObservationSpace. This includes observations which consist of a set of 2D "
"images or an RGB image. Instead the output observation space was defined as: {}"
.format(output_observation_space.__class__))
self.planar_map_output_shape = copy.copy(self.output_observation_space.shape)
self.planar_map_output_shape = np.delete(self.planar_map_output_shape,
self.output_observation_space.channels_axis)
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not isinstance(input_observation_space, PlanarMapsObservationSpace):
raise ValueError("The rescale filter only applies to observation spaces that inherit from "
"PlanarMapsObservationSpace. This includes observations which consist of a set of 2D "
"images or an RGB image. Instead the input observation space was defined as: {}"
.format(input_observation_space.__class__))
if input_observation_space.shape[input_observation_space.channels_axis] \
!= self.output_observation_space.shape[self.output_observation_space.channels_axis]:
raise ValueError("The number of channels between the input and output observation spaces must match. "
"Instead the number of channels were: {}, {}"
.format(input_observation_space.shape[input_observation_space.channels_axis],
self.output_observation_space.shape[self.output_observation_space.channels_axis]))
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
observation = observation.astype('uint8')
# rescale
if isinstance(self.output_observation_space, ImageObservationSpace):
observation = resize(observation, tuple(self.output_observation_space.shape), anti_aliasing=False,
preserve_range=True).astype('uint8')
else:
new_observation = []
for i in range(self.output_observation_space.shape[self.output_observation_space.channels_axis]):
new_observation.append(resize(observation.take(i, self.output_observation_space.channels_axis),
tuple(self.planar_map_output_shape),
preserve_range=True).astype('uint8'))
new_observation = np.array(new_observation)
observation = new_observation.swapaxes(0, self.output_observation_space.channels_axis)
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.shape = self.output_observation_space.shape
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_rescale_to_size_filter.py | 0.866189 | 0.70374 | observation_rescale_to_size_filter.py | pypi |
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, PlanarMapsObservationSpace
class ObservationMoveAxisFilter(ObservationFilter):
"""
Reorders the axes of the observation. This can be useful when the observation is an
image, and we want to move the channel axis to be the last axis instead of the first axis.
"""
def __init__(self, axis_origin: int = None, axis_target: int=None):
"""
:param axis_origin: The axis to move
:param axis_target: Where to move the selected axis to
"""
super().__init__()
self.axis_origin = axis_origin
self.axis_target = axis_target
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
shape = input_observation_space.shape
if not -len(shape) <= self.axis_origin < len(shape) or not -len(shape) <= self.axis_target < len(shape):
raise ValueError("The given axis does not exist in the context of the input observation shape. ")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
return np.moveaxis(observation, self.axis_origin, self.axis_target)
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
axis_size = input_observation_space.shape[self.axis_origin]
input_observation_space.shape = np.delete(input_observation_space.shape, self.axis_origin)
if self.axis_target == -1:
input_observation_space.shape = np.append(input_observation_space.shape, axis_size)
elif self.axis_target < -1:
input_observation_space.shape = np.insert(input_observation_space.shape, self.axis_target+1, axis_size)
else:
input_observation_space.shape = np.insert(input_observation_space.shape, self.axis_target, axis_size)
# move the channels axis according to the axis change
if isinstance(input_observation_space, PlanarMapsObservationSpace):
if input_observation_space.channels_axis == self.axis_origin:
input_observation_space.channels_axis = self.axis_target
elif input_observation_space.channels_axis == self.axis_target:
input_observation_space.channels_axis = self.axis_origin
elif self.axis_origin < input_observation_space.channels_axis < self.axis_target:
input_observation_space.channels_axis -= 1
elif self.axis_target < input_observation_space.channels_axis < self.axis_origin:
input_observation_space.channels_axis += 1
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_move_axis_filter.py | 0.856647 | 0.677047 | observation_move_axis_filter.py | pypi |
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationToUInt8Filter(ObservationFilter):
"""
Converts a floating point observation into an unsigned int 8 bit observation. This is
mostly useful for reducing memory consumption and is usually used for image observations. The filter will first
spread the observation values over the range 0-255 and then discretize them into integer values.
"""
def __init__(self, input_low: float, input_high: float):
"""
:param input_low: The lowest value currently present in the observation
:param input_high: The highest value currently present in the observation
"""
super().__init__()
self.input_low = input_low
self.input_high = input_high
if input_high <= input_low:
raise ValueError("The input observation space high values can be less or equal to the input observation "
"space low values")
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if np.all(input_observation_space.low != self.input_low) or \
np.all(input_observation_space.high != self.input_high):
raise ValueError("The observation space values range don't match the configuration of the filter."
"The configuration is: low = {}, high = {}. The actual values are: low = {}, high = {}"
.format(self.input_low, self.input_high,
input_observation_space.low, input_observation_space.high))
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
# scale to 0-1
observation = (observation - self.input_low) / (self.input_high - self.input_low)
# scale to 0-255
observation *= 255
observation = observation.astype('uint8')
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.low = 0
input_observation_space.high = 255
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_to_uint8_filter.py | 0.891699 | 0.72167 | observation_to_uint8_filter.py | pypi |
import copy
from enum import Enum
from typing import List
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, VectorObservationSpace
class ObservationReductionBySubPartsNameFilter(ObservationFilter):
"""
Allows keeping only parts of the observation, by specifying their
name. This is useful when the environment has a measurements vector as observation which includes several different
measurements, but you want the agent to only see some of the measurements and not all.
For example, the CARLA environment extracts multiple measurements that can be used by the agent, such as
speed and location. If we want to only use the speed, it can be done using this filter.
This will currently work only for VectorObservationSpace observations
"""
class ReductionMethod(Enum):
Keep = 0
Discard = 1
def __init__(self, part_names: List[str], reduction_method: ReductionMethod):
"""
:param part_names: A list of part names to reduce
:param reduction_method: A reduction method to use - keep or discard the given parts
"""
super().__init__()
self.part_names = part_names
self.reduction_method = reduction_method
self.measurement_names = None
self.indices_to_keep = None
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
if not isinstance(observation, np.ndarray):
raise ValueError("All the state values are expected to be numpy arrays")
if self.indices_to_keep is None:
raise ValueError("To use ObservationReductionBySubPartsNameFilter, the get_filtered_observation_space "
"function should be called before filtering an observation")
observation = observation[..., self.indices_to_keep]
return observation
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not isinstance(input_observation_space, VectorObservationSpace):
raise ValueError("The ObservationReductionBySubPartsNameFilter support only VectorObservationSpace "
"observations. The given observation space was: {}"
.format(input_observation_space.__class__))
def get_filtered_observation_space(self, input_observation_space: VectorObservationSpace) -> ObservationSpace:
self.measurement_names = copy.copy(input_observation_space.measurements_names)
if self.reduction_method == self.ReductionMethod.Keep:
input_observation_space.shape[-1] = len(self.part_names)
self.indices_to_keep = [idx for idx, val in enumerate(self.measurement_names) if val in self.part_names]
input_observation_space.measurements_names = copy.copy(self.part_names)
elif self.reduction_method == self.ReductionMethod.Discard:
input_observation_space.shape[-1] -= len(self.part_names)
self.indices_to_keep = [idx for idx, val in enumerate(self.measurement_names) if val not in self.part_names]
input_observation_space.measurements_names = [val for val in input_observation_space.measurements_names if
val not in self.part_names]
else:
raise ValueError("The given reduction method is not supported")
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_reduction_by_sub_parts_name_filter.py | 0.937196 | 0.57529 | observation_reduction_by_sub_parts_name_filter.py | pypi |
from typing import Union, Tuple
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationCropFilter(ObservationFilter):
"""
Crops the size of the observation to a given crop window. For example, in Atari, the
observations are images with a shape of 210x160. Usually, we will want to crop the size of the observation to a
square of 160x160 before rescaling them.
"""
def __init__(self, crop_low: np.ndarray=None, crop_high: np.ndarray=None):
"""
:param crop_low: a vector where each dimension describes the start index for cropping the observation in the
corresponding dimension. a negative value of -1 will be mapped to the max size
:param crop_high: a vector where each dimension describes the end index for cropping the observation in the
corresponding dimension. a negative value of -1 will be mapped to the max size
"""
super().__init__()
if crop_low is None and crop_high is None:
raise ValueError("At least one of crop_low and crop_high should be set to a real value. ")
if crop_low is None:
crop_low = np.array([0] * len(crop_high))
if crop_high is None:
crop_high = np.array([-1] * len(crop_low))
self.crop_low = crop_low
self.crop_high = crop_high
for h, l in zip(crop_high, crop_low):
if h < l and h != -1:
raise ValueError("Some of the cropping low values are higher than cropping high values")
if np.any(crop_high < -1) or np.any(crop_low < -1):
raise ValueError("Cropping values cannot be negative")
if crop_low.shape != crop_high.shape:
raise ValueError("The low values and high values for cropping must have the same number of dimensions")
if crop_low.dtype != int or crop_high.dtype != int:
raise ValueError("The crop values should be int values, instead they are defined as: {} and {}"
.format(crop_low.dtype, crop_high.dtype))
def _replace_negative_one_in_crop_size(self, crop_size: np.ndarray, observation_shape: Union[Tuple, np.ndarray]):
# replace -1 with the max size
crop_size = crop_size.copy()
for i in range(len(observation_shape)):
if crop_size[i] == -1:
crop_size[i] = observation_shape[i]
return crop_size
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, input_observation_space.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, input_observation_space.shape)
if np.any(crop_high > input_observation_space.shape) or \
np.any(crop_low > input_observation_space.shape):
raise ValueError("The cropping values are outside of the observation space")
if not input_observation_space.is_valid_index(crop_low) or \
not input_observation_space.is_valid_index(crop_high - 1):
raise ValueError("The cropping indices are outside of the observation space")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
# replace -1 with the max size
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, observation.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, observation.shape)
# crop
indices = [slice(i, j) for i, j in zip(crop_low, crop_high)]
observation = observation[indices]
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
# replace -1 with the max size
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, input_observation_space.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, input_observation_space.shape)
input_observation_space.shape = crop_high - crop_low
return input_observation_space | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_crop_filter.py | 0.936605 | 0.777638 | observation_crop_filter.py | pypi |
import os
import pickle
from typing import List
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
from rl_coach.utilities.shared_running_stats import NumpySharedRunningStats, NumpySharedRunningStats
class ObservationNormalizationFilter(ObservationFilter):
"""
Normalizes the observation values with a running mean and standard deviation of
all the observations seen so far. The normalization is performed element-wise. Additionally, when working with
multiple workers, the statistics used for the normalization operation are accumulated over all the workers.
"""
def __init__(self, clip_min: float=-5.0, clip_max: float=5.0, name='observation_stats'):
"""
:param clip_min: The minimum value to allow after normalizing the observation
:param clip_max: The maximum value to allow after normalizing the observation
"""
super().__init__()
self.clip_min = clip_min
self.clip_max = clip_max
self.running_observation_stats = None
self.name = name
self.supports_batching = True
self.observation_space = None
def set_device(self, device, memory_backend_params=None, mode='numpy') -> None:
"""
An optional function that allows the filter to get the device if it is required to use tensorflow ops
:param device: the device to use
:memory_backend_params: if not None, holds params for a memory backend for sharing data (e.g. Redis)
:param mode: the arithmetic module to use {'tf' | 'numpy'}
:return: None
"""
if mode == 'tf':
from rl_coach.architectures.tensorflow_components.shared_variables import TFSharedRunningStats
self.running_observation_stats = TFSharedRunningStats(device, name=self.name, create_ops=False,
pubsub_params=memory_backend_params)
elif mode == 'numpy':
self.running_observation_stats = NumpySharedRunningStats(name=self.name,
pubsub_params=memory_backend_params)
def set_session(self, sess) -> None:
"""
An optional function that allows the filter to get the session if it is required to use tensorflow ops
:param sess: the session
:return: None
"""
self.running_observation_stats.set_session(sess)
def filter(self, observations: List[ObservationType], update_internal_state: bool=True) -> ObservationType:
observations = np.array(observations)
if update_internal_state:
self.running_observation_stats.push(observations)
self.last_mean = self.running_observation_stats.mean
self.last_stdev = self.running_observation_stats.std
return self.running_observation_stats.normalize(observations)
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
self.running_observation_stats.set_params(shape=input_observation_space.shape,
clip_values=(self.clip_min, self.clip_max))
return input_observation_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.save_state_to_checkpoint(checkpoint_dir, checkpoint_prefix)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.restore_state_from_checkpoint(checkpoint_dir, checkpoint_prefix) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/filters/observation/observation_normalization_filter.py | 0.918567 | 0.487795 | observation_normalization_filter.py | pypi |
import time
import os
from rl_coach.checkpoint import CheckpointStateReader
from rl_coach.data_stores.data_store import SyncFiles
class CheckpointDataStore(object):
"""
A DataStore which relies on the GraphManager check pointing methods to communicate policies.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.checkpoint_num = 0
def end_of_policies(self) -> bool:
"""
Returns true if no new policies will be added to this DataStore. This typically happens
because training has completed and is used to signal to the rollout workers to stop.
"""
return os.path.exists(
os.path.join(self.checkpoint_dir, SyncFiles.FINISHED.value)
)
def save_policy(self, graph_manager):
# TODO: it would be nice if restore_checkpoint accepted a checkpoint path as a
# parameter. as it is, one cannot distinguish between checkpoints used for coordination
# and checkpoints requested to a persistent disk for later use
graph_manager.task_parameters.checkpoint_restore_path = self.checkpoint_dir
graph_manager.save_checkpoint()
def load_policy(self, graph_manager, require_new_policy=True, timeout=None):
"""
Load a checkpoint into the specified graph_manager. The expectation here is that
save_to_store() and load_from_store() will synchronize a checkpoint directory with a
central repository such as NFS or S3.
:param graph_manager: the graph_manager to load the policy into
:param require_new_policy: if True, only load a policy if it hasn't been loaded in this
process yet before.
:param timeout: Will only try to load the policy once if timeout is None, otherwise will
retry for timeout seconds
"""
if self._new_policy_exists(require_new_policy, timeout):
# TODO: it would be nice if restore_checkpoint accepted a checkpoint path as a
# parameter. as it is, one cannot distinguish between checkpoints used for coordination
# and checkpoints requested to a persistent disk for later use
graph_manager.task_parameters.checkpoint_restore_path = self.checkpoint_dir
graph_manager.restore_checkpoint()
def _new_policy_exists(self, require_new_policy=True, timeout=None) -> bool:
"""
:param require_new_policy: if True, only load a policy if it hasn't been loaded in this
process yet before.
:param timeout: Will only try to load the policy once if timeout is None, otherwise will
retry for timeout seconds
"""
checkpoint_state_reader = CheckpointStateReader(
self.checkpoint_dir, checkpoint_state_optional=False
)
checkpoint = "first"
if timeout is None:
timeout = 0
timeout_ends = time.time() + timeout
while time.time() < timeout_ends or checkpoint == "first":
if self.end_of_policies():
return False
self.load_from_store()
checkpoint = checkpoint_state_reader.get_latest()
if checkpoint is not None:
if not require_new_policy or checkpoint.num > self.checkpoint_num:
self.checkpoint_num = checkpoint.num
return True
raise ValueError(
"Waited for {timeout} seconds, but no first policy was received.".format(
timeout=timeout
)
) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/data_stores/checkpoint_data_store.py | 0.465873 | 0.236461 | checkpoint_data_store.py | pypi |
import uuid
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.checkpoint_data_store import CheckpointDataStore
class NFSDataStoreParameters(DataStoreParameters):
def __init__(self, ds_params, deployed=False, server=None, path=None, checkpoint_dir: str=""):
super().__init__(ds_params.store_type, ds_params.orchestrator_type, ds_params.orchestrator_params)
self.namespace = "default"
if "namespace" in ds_params.orchestrator_params:
self.namespace = ds_params.orchestrator_params["namespace"]
self.checkpoint_dir = checkpoint_dir
self.name = None
self.pvc_name = None
self.pv_name = None
self.svc_name = None
self.server = None
self.path = "/"
self.deployed = deployed
if deployed:
self.server = server
self.path = path
class NFSDataStore(CheckpointDataStore):
"""
An implementation of data store which uses NFS for storing policy checkpoints when using Coach in distributed mode.
The policy checkpoints are written by the trainer and read by the rollout worker.
"""
def __init__(self, params: NFSDataStoreParameters):
"""
:param params: The parameters required to use the NFS data store.
"""
self.params = params
def deploy(self) -> bool:
"""
Deploy the NFS server in an orchestrator if/when required.
"""
if self.params.orchestrator_type == "kubernetes":
if not self.params.deployed:
if not self.deploy_k8s_nfs():
return False
if not self.create_k8s_nfs_resources():
return False
return True
def get_info(self):
from kubernetes import client as k8sclient
return k8sclient.V1PersistentVolumeClaimVolumeSource(
claim_name=self.params.pvc_name
)
def undeploy(self) -> bool:
"""
Undeploy the NFS server and resources from an orchestrator.
"""
if self.params.orchestrator_type == "kubernetes":
if not self.params.deployed:
if not self.undeploy_k8s_nfs():
return False
if not self.delete_k8s_nfs_resources():
return False
return True
def save_to_store(self):
pass
def load_from_store(self):
pass
def deploy_k8s_nfs(self) -> bool:
"""
Deploy the NFS server in the Kubernetes orchestrator.
"""
from kubernetes import client as k8sclient
name = "nfs-server-{}".format(uuid.uuid4())
container = k8sclient.V1Container(
name=name,
image="k8s.gcr.io/volume-nfs:0.8",
ports=[k8sclient.V1ContainerPort(
name="nfs",
container_port=2049,
protocol="TCP"
),
k8sclient.V1ContainerPort(
name="rpcbind",
container_port=111
),
k8sclient.V1ContainerPort(
name="mountd",
container_port=20048
),
],
volume_mounts=[k8sclient.V1VolumeMount(
name='nfs-host-path',
mount_path='/exports'
)],
security_context=k8sclient.V1SecurityContext(privileged=True)
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
volumes=[k8sclient.V1Volume(
name="nfs-host-path",
host_path=k8sclient.V1HostPathVolumeSource(path='/tmp/nfsexports-{}'.format(uuid.uuid4()))
)]
)
)
deployment_spec = k8sclient.V1DeploymentSpec(
replicas=1,
template=template,
selector=k8sclient.V1LabelSelector(
match_labels={'app': name}
)
)
deployment = k8sclient.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=k8sclient.V1ObjectMeta(name=name, labels={'app': name}),
spec=deployment_spec
)
k8s_apps_v1_api_client = k8sclient.AppsV1Api()
try:
k8s_apps_v1_api_client.create_namespaced_deployment(self.params.namespace, deployment)
self.params.name = name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating nfs-server", e)
return False
k8s_core_v1_api_client = k8sclient.CoreV1Api()
svc_name = "nfs-service-{}".format(uuid.uuid4())
service = k8sclient.V1Service(
api_version='v1',
kind='Service',
metadata=k8sclient.V1ObjectMeta(
name=svc_name
),
spec=k8sclient.V1ServiceSpec(
selector={'app': self.params.name},
ports=[k8sclient.V1ServicePort(
protocol='TCP',
port=2049,
target_port=2049
)]
)
)
try:
svc_response = k8s_core_v1_api_client.create_namespaced_service(self.params.namespace, service)
self.params.svc_name = svc_name
self.params.server = svc_response.spec.cluster_ip
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating a service for nfs-server", e)
return False
return True
def create_k8s_nfs_resources(self) -> bool:
"""
Create NFS resources such as PV and PVC in Kubernetes.
"""
from kubernetes import client as k8sclient
pv_name = "nfs-ckpt-pv-{}".format(uuid.uuid4())
persistent_volume = k8sclient.V1PersistentVolume(
api_version="v1",
kind="PersistentVolume",
metadata=k8sclient.V1ObjectMeta(
name=pv_name,
labels={'app': pv_name}
),
spec=k8sclient.V1PersistentVolumeSpec(
access_modes=["ReadWriteMany"],
nfs=k8sclient.V1NFSVolumeSource(
path=self.params.path,
server=self.params.server
),
capacity={'storage': '10Gi'},
storage_class_name=""
)
)
k8s_api_client = k8sclient.CoreV1Api()
try:
k8s_api_client.create_persistent_volume(persistent_volume)
self.params.pv_name = pv_name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating the NFS PV", e)
return False
pvc_name = "nfs-ckpt-pvc-{}".format(uuid.uuid4())
persistent_volume_claim = k8sclient.V1PersistentVolumeClaim(
api_version="v1",
kind="PersistentVolumeClaim",
metadata=k8sclient.V1ObjectMeta(
name=pvc_name
),
spec=k8sclient.V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteMany"],
resources=k8sclient.V1ResourceRequirements(
requests={'storage': '10Gi'}
),
selector=k8sclient.V1LabelSelector(
match_labels={'app': self.params.pv_name}
),
storage_class_name=""
)
)
try:
k8s_api_client.create_namespaced_persistent_volume_claim(self.params.namespace, persistent_volume_claim)
self.params.pvc_name = pvc_name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating the NFS PVC", e)
return False
return True
def undeploy_k8s_nfs(self) -> bool:
from kubernetes import client as k8sclient
del_options = k8sclient.V1DeleteOptions()
k8s_apps_v1_api_client = k8sclient.AppsV1Api()
try:
k8s_apps_v1_api_client.delete_namespaced_deployment(self.params.name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting nfs-server", e)
return False
k8s_core_v1_api_client = k8sclient.CoreV1Api()
try:
k8s_core_v1_api_client.delete_namespaced_service(self.params.svc_name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting the service for nfs-server", e)
return False
return True
def delete_k8s_nfs_resources(self) -> bool:
"""
Delete NFS resources such as PV and PVC from the Kubernetes orchestrator.
"""
from kubernetes import client as k8sclient
del_options = k8sclient.V1DeleteOptions()
k8s_api_client = k8sclient.CoreV1Api()
try:
k8s_api_client.delete_persistent_volume(self.params.pv_name, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting NFS PV", e)
return False
try:
k8s_api_client.delete_namespaced_persistent_volume_claim(self.params.pvc_name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting NFS PVC", e)
return False
return True
def setup_checkpoint_dir(self, crd=None):
if crd:
# TODO: find a way to upload this to the deployed nfs store.
pass | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/data_stores/nfs_data_store.py | 0.541409 | 0.150809 | nfs_data_store.py | pypi |
import copy
from typing import Union
from collections import OrderedDict
import numpy as np
from rl_coach.agents.agent import Agent
from rl_coach.agents.ddpg_agent import DDPGAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, TD3VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters, EmbedderScheme
from rl_coach.core_types import ActionInfo, TrainingSteps, Transition
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace, GoalsSpace
class TD3CriticNetworkParameters(NetworkParameters):
def __init__(self, num_q_networks):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(),
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
self.middleware_parameters = FCMiddlewareParameters(num_streams=num_q_networks)
self.heads_parameters = [TD3VHeadParameters()]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3ActorNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DDPGActorHeadParameters(batchnorm=False)]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3AlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target
:param num_consecutive_playing_steps: (StepMethod)
The number of consecutive steps to act between every two training iterations
:param use_target_network_for_evaluation: (bool)
If set to True, the target network will be used for predicting the actions when choosing actions to act.
Since the target network weights change more slowly, the predicted actions will be more consistent.
:param action_penalty: (float)
The amount by which to penalize the network on high action feature (pre-activation) values.
This can prevent the actions features from saturating the TanH activation function, and therefore prevent the
gradients from becoming very low.
:param clip_critic_targets: (Tuple[float, float] or None)
The range to clip the critic target to in order to prevent overestimation of the action values.
:param use_non_zero_discount_for_terminal_states: (bool)
If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state
values. If set to False, the terminal states reward will be taken as the target return for the network.
"""
def __init__(self):
super().__init__()
self.rate_for_copying_weights_to_target = 0.005
self.use_target_network_for_evaluation = False
self.action_penalty = 0
self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None
self.use_non_zero_discount_for_terminal_states = False
self.act_for_full_episodes = True
self.update_policy_every_x_episode_steps = 2
self.num_steps_between_copying_online_weights_to_target = TrainingSteps(self.update_policy_every_x_episode_steps)
self.policy_noise = 0.2
self.noise_clipping = 0.5
self.num_q_networks = 2
class TD3AgentExplorationParameters(AdditiveNoiseParameters):
def __init__(self):
super().__init__()
self.noise_as_percentage_from_action_space = False
class TD3AgentParameters(AgentParameters):
def __init__(self):
td3_algorithm_params = TD3AlgorithmParameters()
super().__init__(algorithm=td3_algorithm_params,
exploration=TD3AgentExplorationParameters(),
memory=EpisodicExperienceReplayParameters(),
networks=OrderedDict([("actor", TD3ActorNetworkParameters()),
("critic",
TD3CriticNetworkParameters(td3_algorithm_params.num_q_networks))]))
@property
def path(self):
return 'rl_coach.agents.td3_agent:TD3Agent'
# Twin Delayed DDPG - https://arxiv.org/pdf/1802.09477.pdf
class TD3Agent(DDPGAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.q_values = self.register_signal("Q")
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']
actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# TD error = r + discount*max(q_st_plus_1) - q_st
next_actions, actions_mean = actor.parallel_prediction([
(actor.target_network, batch.next_states(actor_keys)),
(actor.online_network, batch.states(actor_keys))
])
# add noise to the next_actions
noise = np.random.normal(0, self.ap.algorithm.policy_noise, next_actions.shape).clip(
-self.ap.algorithm.noise_clipping, self.ap.algorithm.noise_clipping)
next_actions = self.spaces.action.clip_action_to_space(next_actions + noise)
critic_inputs = copy.copy(batch.next_states(critic_keys))
critic_inputs['action'] = next_actions
q_st_plus_1 = critic.target_network.predict(critic_inputs)[2] # output #2 is the min (Q1, Q2)
# calculate the bootstrapped TD targets while discounting terminal states according to
# use_non_zero_discount_for_terminal_states
if self.ap.algorithm.use_non_zero_discount_for_terminal_states:
TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1
else:
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1
# clip the TD targets to prevent overestimation errors
if self.ap.algorithm.clip_critic_targets:
TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)
self.TD_targets_signal.add_sample(TD_targets)
# train the critic
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)
result = critic.train_and_sync_networks(critic_inputs, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
if self.training_iteration % self.ap.algorithm.update_policy_every_x_episode_steps == 0:
# get the gradients of output #3 (=mean of Q1 network) w.r.t the action
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = actions_mean
action_gradients = critic.online_network.predict(critic_inputs,
outputs=critic.online_network.gradients_wrt_inputs[3]['action'])
# apply the gradients from the critic to the actor
initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}
gradients = actor.online_network.predict(batch.states(actor_keys),
outputs=actor.online_network.weighted_gradients[0],
initial_feed_dict=initial_feed_dict)
if actor.has_global:
actor.apply_gradients_to_global_network(gradients)
actor.update_online_network()
else:
actor.apply_gradients_to_online_network(gradients)
return total_loss, losses, unclipped_grads
def train(self):
self.ap.algorithm.num_consecutive_training_steps = self.current_episode_steps_counter
return Agent.train(self)
def update_transition_before_adding_to_replay_buffer(self, transition: Transition) -> Transition:
"""
Allows agents to update the transition just before adding it to the replay buffer.
Can be useful for agents that want to tweak the reward, termination signal, etc.
:param transition: the transition to update
:return: the updated transition
"""
transition.game_over = False if self.current_episode_steps_counter ==\
self.parent_level_manager.environment.env._max_episode_steps\
else transition.game_over
return transition | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/td3_agent.py | 0.893959 | 0.360883 | td3_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import ACERPolicyHeadParameters, QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters
from rl_coach.core_types import Batch
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import DiscreteActionSpace
from rl_coach.utils import eps, last_sample
class ACERAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_gradient_updates: (int)
Every num_steps_between_gradient_updates transitions will be considered as a single batch and use for
accumulating gradients. This is also the number of steps used for bootstrapping according to the n-step formulation.
:param ratio_of_replay: (int)
The number of off-policy training iterations in each ACER iteration.
:param num_transitions_to_start_replay: (int)
Number of environment steps until ACER starts to train off-policy from the experience replay.
This emulates a heat-up phase where the agents learns only on-policy until there are enough transitions in
the experience replay to start the off-policy training.
:param rate_for_copying_weights_to_target: (float)
The rate of the exponential moving average for the average policy which is used for the trust region optimization.
The target network in this algorithm is used as the average policy.
:param importance_weight_truncation: (float)
The clipping constant for the importance weight truncation (not used in the Q-retrace calculation).
:param use_trust_region_optimization: (bool)
If set to True, the gradients of the network will be modified with a term dependant on the KL divergence between
the average policy and the current one, to bound the change of the policy during the network update.
:param max_KL_divergence: (float)
The upper bound parameter for the trust region optimization, use_trust_region_optimization needs to be set true
for this parameter to have an effect.
:param beta_entropy: (float)
An entropy regulaization term can be added to the loss function in order to control exploration. This term
is weighted using the beta value defined by beta_entropy.
"""
def __init__(self):
super().__init__()
self.apply_gradients_every_x_episodes = 5
self.num_steps_between_gradient_updates = 5000
self.ratio_of_replay = 4
self.num_transitions_to_start_replay = 10000
self.rate_for_copying_weights_to_target = 0.01
self.importance_weight_truncation = 10.0
self.use_trust_region_optimization = True
self.max_KL_divergence = 1.0
self.beta_entropy = 0
class ACERNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [QHeadParameters(loss_weight=0.5), ACERPolicyHeadParameters(loss_weight=1.0)]
self.optimizer_type = 'Adam'
self.async_training = True
self.clip_gradients = 40.0
self.create_target_network = True
class ACERAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=ACERAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters()},
memory=EpisodicExperienceReplayParameters(),
networks={"main": ACERNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.acer_agent:ACERAgent'
# Actor-Critic with Experience Replay - https://arxiv.org/abs/1611.01224
class ACERAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
# signals definition
self.q_loss = self.register_signal('Q Loss')
self.policy_loss = self.register_signal('Policy Loss')
self.probability_loss = self.register_signal('Probability Loss')
self.bias_correction_loss = self.register_signal('Bias Correction Loss')
self.unclipped_grads = self.register_signal('Grads (unclipped)')
self.V_Values = self.register_signal('Values')
self.kl_divergence = self.register_signal('KL Divergence')
def _learn_from_batch(self, batch):
fetches = [self.networks['main'].online_network.output_heads[1].probability_loss,
self.networks['main'].online_network.output_heads[1].bias_correction_loss,
self.networks['main'].online_network.output_heads[1].kl_divergence]
# batch contains a list of transitions to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
Q_values, policy_prob = self.networks['main'].online_network.predict(batch.states(network_keys))
avg_policy_prob = self.networks['main'].target_network.predict(batch.states(network_keys))[1]
current_state_values = np.sum(policy_prob * Q_values, axis=1)
actions = batch.actions()
num_transitions = batch.size
Q_head_targets = Q_values
Q_i = Q_values[np.arange(num_transitions), actions]
mu = batch.info('all_action_probabilities')
rho = policy_prob / (mu + eps)
rho_i = rho[np.arange(batch.size), actions]
rho_bar = np.minimum(1.0, rho_i)
if batch.game_overs()[-1]:
Qret = 0
else:
result = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))
Qret = np.sum(result[0] * result[1], axis=1)[0]
for i in reversed(range(num_transitions)):
Qret = batch.rewards()[i] + self.ap.algorithm.discount * Qret
Q_head_targets[i, actions[i]] = Qret
Qret = rho_bar[i] * (Qret - Q_i[i]) + current_state_values[i]
Q_retrace = Q_head_targets[np.arange(num_transitions), actions]
# train
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys),
'output_1_0': actions,
'output_1_1': rho,
'output_1_2': rho_i,
'output_1_3': Q_values,
'output_1_4': Q_retrace,
'output_1_5': avg_policy_prob},
[Q_head_targets, current_state_values],
additional_fetches=fetches)
for network in self.networks.values():
network.update_target_network(self.ap.algorithm.rate_for_copying_weights_to_target)
# logging
total_loss, losses, unclipped_grads, fetch_result = result[:4]
self.q_loss.add_sample(losses[0])
self.policy_loss.add_sample(losses[1])
self.probability_loss.add_sample(fetch_result[0])
self.bias_correction_loss.add_sample(fetch_result[1])
self.unclipped_grads.add_sample(unclipped_grads)
self.V_Values.add_sample(current_state_values)
self.kl_divergence.add_sample(fetch_result[2])
return total_loss, losses, unclipped_grads
def learn_from_batch(self, batch):
# perform on-policy training iteration
total_loss, losses, unclipped_grads = self._learn_from_batch(batch)
if self.ap.algorithm.ratio_of_replay > 0 \
and self.memory.num_transitions() > self.ap.algorithm.num_transitions_to_start_replay:
n = np.random.poisson(self.ap.algorithm.ratio_of_replay)
# perform n off-policy training iterations
for _ in range(n):
new_batch = Batch(self.call_memory('sample', (self.ap.algorithm.num_steps_between_gradient_updates, True)))
result = self._learn_from_batch(new_batch)
total_loss += result[0]
losses += result[1]
unclipped_grads += result[2]
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "main")
return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/acer_agent.py | 0.925424 | 0.474266 | acer_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import NAFHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, \
NetworkParameters
from rl_coach.core_types import ActionInfo, EnvironmentSteps
from rl_coach.exploration_policies.ou_process import OUProcessParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace
class NAFNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [NAFHeadParameters()]
self.optimizer_type = 'Adam'
self.learning_rate = 0.001
self.async_training = True
self.create_target_network = True
class NAFAlgorithmParameters(AlgorithmParameters):
def __init__(self):
super().__init__()
self.num_consecutive_training_steps = 5
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.001
class NAFAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NAFAlgorithmParameters(),
exploration=OUProcessParameters(),
memory=EpisodicExperienceReplayParameters(),
networks={"main": NAFNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.naf_agent:NAFAgent'
# Normalized Advantage Functions - https://arxiv.org/pdf/1603.00748.pdf
class NAFAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.l_values = self.register_signal("L")
self.a_values = self.register_signal("Advantage")
self.mu_values = self.register_signal("Action")
self.v_values = self.register_signal("V")
self.TD_targets = self.register_signal("TD targets")
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# TD error = r + discount*v_st_plus_1 - q_st
v_st_plus_1 = self.networks['main'].target_network.predict(
batch.next_states(network_keys),
self.networks['main'].target_network.output_heads[0].V,
squeeze_output=False,
)
TD_targets = np.expand_dims(batch.rewards(), -1) + \
(1.0 - np.expand_dims(batch.game_overs(), -1)) * self.ap.algorithm.discount * v_st_plus_1
self.TD_targets.add_sample(TD_targets)
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys),
'output_0_0': batch.actions(len(batch.actions().shape) == 1)
}, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def choose_action(self, curr_state):
if type(self.spaces.action) != BoxActionSpace:
raise ValueError('NAF works only for continuous control problems')
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'main')
naf_head = self.networks['main'].online_network.output_heads[0]
action_values = self.networks['main'].online_network.predict(tf_input_state, outputs=naf_head.mu,
squeeze_output=False)
# get the actual action to use
action = self.exploration_policy.get_action(action_values)
# get the internal values for logging
outputs = [naf_head.mu, naf_head.Q, naf_head.L, naf_head.A, naf_head.V]
result = self.networks['main'].online_network.predict(
{**tf_input_state, 'output_0_0': action_values},
outputs=outputs
)
mu, Q, L, A, V = result
# store the q values statistics for logging
self.q_values.add_sample(Q)
self.l_values.add_sample(L)
self.a_values.add_sample(A)
self.mu_values.add_sample(mu)
self.v_values.add_sample(V)
action_info = ActionInfo(action=action, action_value=Q)
return action_info | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/naf_agent.py | 0.901252 | 0.275934 | naf_agent.py | pypi |
from typing import Union
from rl_coach.agents.imitation_agent import ImitationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import RegressionHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AgentParameters, MiddlewareScheme, NetworkParameters, AlgorithmParameters
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.non_episodic.balanced_experience_replay import BalancedExperienceReplayParameters
class CILAlgorithmParameters(AlgorithmParameters):
"""
:param state_key_with_the_class_index: (str)
The key of the state dictionary which corresponds to the value that will be used to control the class index.
"""
def __init__(self):
super().__init__()
self.state_key_with_the_class_index = 'high_level_command'
class CILNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters(scheme=MiddlewareScheme.Medium)
self.heads_parameters = [RegressionHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 32
self.replace_mse_with_huber_loss = False
self.create_target_network = False
class CILAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=CILAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=BalancedExperienceReplayParameters(),
networks={"main": CILNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.cil_agent:CILAgent'
# Conditional Imitation Learning Agent: https://arxiv.org/abs/1710.02410
class CILAgent(ImitationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_high_level_control = 0
def choose_action(self, curr_state):
self.current_high_level_control = curr_state[self.ap.algorithm.state_key_with_the_class_index]
return super().choose_action(curr_state)
def extract_action_values(self, prediction):
return prediction[self.current_high_level_control].squeeze()
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
target_values = self.networks['main'].online_network.predict({**batch.states(network_keys)})
branch_to_update = batch.states([self.ap.algorithm.state_key_with_the_class_index])[self.ap.algorithm.state_key_with_the_class_index]
for idx, branch in enumerate(branch_to_update):
target_values[branch][idx] = batch.actions()[idx]
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys)}, target_values)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/cil_agent.py | 0.927601 | 0.283546 | cil_agent.py | pypi |
import copy
from typing import Union
from collections import OrderedDict
import numpy as np
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
from rl_coach.agents.agent import Agent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters, EmbedderScheme
from rl_coach.core_types import ActionInfo, EnvironmentSteps
from rl_coach.exploration_policies.ou_process import OUProcessParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace, GoalsSpace
class DDPGCriticNetworkParameters(NetworkParameters):
def __init__(self, use_batchnorm=False):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm),
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DDPGVHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 64
self.async_training = False
self.learning_rate = 0.001
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
# self.l2_regularization = 1e-2
class DDPGActorNetworkParameters(NetworkParameters):
def __init__(self, use_batchnorm=False):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm)}
self.middleware_parameters = FCMiddlewareParameters(batchnorm=use_batchnorm)
self.heads_parameters = [DDPGActorHeadParameters(batchnorm=use_batchnorm)]
self.optimizer_type = 'Adam'
self.batch_size = 64
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.async_training = False
self.learning_rate = 0.0001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class DDPGAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target
:param num_consecutive_playing_steps: (StepMethod)
The number of consecutive steps to act between every two training iterations
:param use_target_network_for_evaluation: (bool)
If set to True, the target network will be used for predicting the actions when choosing actions to act.
Since the target network weights change more slowly, the predicted actions will be more consistent.
:param action_penalty: (float)
The amount by which to penalize the network on high action feature (pre-activation) values.
This can prevent the actions features from saturating the TanH activation function, and therefore prevent the
gradients from becoming very low.
:param clip_critic_targets: (Tuple[float, float] or None)
The range to clip the critic target to in order to prevent overestimation of the action values.
:param use_non_zero_discount_for_terminal_states: (bool)
If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state
values. If set to False, the terminal states reward will be taken as the target return for the network.
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.001
self.num_consecutive_playing_steps = EnvironmentSteps(1)
self.use_target_network_for_evaluation = False
self.action_penalty = 0
self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None
self.use_non_zero_discount_for_terminal_states = False
class DDPGAgentParameters(AgentParameters):
def __init__(self, use_batchnorm=False):
super().__init__(algorithm=DDPGAlgorithmParameters(),
exploration=OUProcessParameters(),
memory=EpisodicExperienceReplayParameters(),
networks=OrderedDict([("actor", DDPGActorNetworkParameters(use_batchnorm=use_batchnorm)),
("critic", DDPGCriticNetworkParameters(use_batchnorm=use_batchnorm))]))
@property
def path(self):
return 'rl_coach.agents.ddpg_agent:DDPGAgent'
# Deep Deterministic Policy Gradients Network - https://arxiv.org/pdf/1509.02971.pdf
class DDPGAgent(ActorCriticAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.q_values = self.register_signal("Q")
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']
actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# TD error = r + discount*max(q_st_plus_1) - q_st
next_actions, actions_mean = actor.parallel_prediction([
(actor.target_network, batch.next_states(actor_keys)),
(actor.online_network, batch.states(actor_keys))
])
critic_inputs = copy.copy(batch.next_states(critic_keys))
critic_inputs['action'] = next_actions
q_st_plus_1 = critic.target_network.predict(critic_inputs)[0]
# calculate the bootstrapped TD targets while discounting terminal states according to
# use_non_zero_discount_for_terminal_states
if self.ap.algorithm.use_non_zero_discount_for_terminal_states:
TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1
else:
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1
# clip the TD targets to prevent overestimation errors
if self.ap.algorithm.clip_critic_targets:
TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)
self.TD_targets_signal.add_sample(TD_targets)
# get the gradients of the critic output with respect to the action
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = actions_mean
action_gradients = critic.online_network.predict(critic_inputs,
outputs=critic.online_network.gradients_wrt_inputs[1]['action'])
# train the critic
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)
# also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work
result = critic.train_and_sync_networks(critic_inputs, TD_targets, use_inputs_for_apply_gradients=True)
total_loss, losses, unclipped_grads = result[:3]
# apply the gradients from the critic to the actor
initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}
gradients = actor.online_network.predict(batch.states(actor_keys),
outputs=actor.online_network.weighted_gradients[0],
initial_feed_dict=initial_feed_dict)
# also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work
if actor.has_global:
actor.apply_gradients_to_global_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))
actor.update_online_network()
else:
actor.apply_gradients_to_online_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))
return total_loss, losses, unclipped_grads
def train(self):
return Agent.train(self)
def choose_action(self, curr_state):
if not (isinstance(self.spaces.action, BoxActionSpace) or isinstance(self.spaces.action, GoalsSpace)):
raise ValueError("DDPG works only for continuous control problems")
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'actor')
if self.ap.algorithm.use_target_network_for_evaluation:
actor_network = self.networks['actor'].target_network
else:
actor_network = self.networks['actor'].online_network
action_values = actor_network.predict(tf_input_state).squeeze()
action = self.exploration_policy.get_action(action_values)
self.action_signal.add_sample(action)
# get q value
tf_input_state = self.prepare_batch_for_inference(curr_state, 'critic')
action_batch = np.expand_dims(action, 0)
if type(action) != np.ndarray:
action_batch = np.array([[action]])
tf_input_state['action'] = action_batch
q_value = self.networks['critic'].online_network.predict(tf_input_state)[0]
self.q_values.add_sample(q_value)
action_info = ActionInfo(action=action,
action_value=q_value)
return action_info | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/ddpg_agent.py | 0.894787 | 0.344636 | ddpg_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.ddpg_agent import DDPGAgent, DDPGAgentParameters, DDPGAlgorithmParameters
from rl_coach.core_types import RunPhase
from rl_coach.spaces import SpacesDefinition
class HACDDPGAlgorithmParameters(DDPGAlgorithmParameters):
"""
:param time_limit: (int)
The number of steps the agent is allowed to act for while trying to achieve its goal
:param sub_goal_testing_rate: (float)
The percent of episodes that will be used for testing the sub goals generated by the upper level agents.
"""
def __init__(self):
super().__init__()
self.time_limit = 40
self.sub_goal_testing_rate = 0.5
class HACDDPGAgentParameters(DDPGAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = HACDDPGAlgorithmParameters()
@property
def path(self):
return 'rl_coach.agents.hac_ddpg_agent:HACDDPGAgent'
# Hierarchical Actor Critic Generating Subgoals DDPG Agent - https://arxiv.org/pdf/1712.00948.pdf
class HACDDPGAgent(DDPGAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.sub_goal_testing_rate = self.ap.algorithm.sub_goal_testing_rate
self.graph_manager = None
def choose_action(self, curr_state):
# top level decides, for each of his generated sub-goals, for all the layers beneath him if this is a sub-goal
# testing phase
graph_manager = self.parent_level_manager.parent_graph_manager
if self.ap.is_a_highest_level_agent:
graph_manager.should_test_current_sub_goal = np.random.rand() < self.sub_goal_testing_rate
if self.phase == RunPhase.TRAIN:
if graph_manager.should_test_current_sub_goal:
self.exploration_policy.change_phase(RunPhase.TEST)
else:
self.exploration_policy.change_phase(self.phase)
action_info = super().choose_action(curr_state)
return action_info
def update_transition_before_adding_to_replay_buffer(self, transition):
graph_manager = self.parent_level_manager.parent_graph_manager
# deal with goals given from a higher level agent
if not self.ap.is_a_highest_level_agent:
transition.state['desired_goal'] = self.current_hrl_goal
transition.next_state['desired_goal'] = self.current_hrl_goal
# TODO: allow setting goals which are not part of the state. e.g. state-embedding using get_prediction
self.distance_from_goal.add_sample(self.spaces.goal.distance_from_goal(
self.current_hrl_goal, transition.next_state))
goal_reward, sub_goal_reached = self.spaces.goal.get_reward_for_goal_and_state(
self.current_hrl_goal, transition.next_state)
transition.reward = goal_reward
transition.game_over = transition.game_over or sub_goal_reached
# each level tests its own generated sub goals
if not self.ap.is_a_lowest_level_agent and graph_manager.should_test_current_sub_goal:
#TODO-fixme
# _, sub_goal_reached = self.parent_level_manager.environment.agents['agent_1'].spaces.goal.\
# get_reward_for_goal_and_state(transition.action, transition.next_state)
_, sub_goal_reached = self.spaces.goal.get_reward_for_goal_and_state(
transition.action, transition.next_state)
sub_goal_is_missed = not sub_goal_reached
if sub_goal_is_missed:
transition.reward = -self.ap.algorithm.time_limit
return transition
def set_environment_parameters(self, spaces: SpacesDefinition):
super().set_environment_parameters(spaces)
if self.ap.is_a_highest_level_agent:
# the rest of the levels already have an in_action_space set to be of type GoalsSpace, thus they will have
# their GoalsSpace set to the in_action_space in agent.set_environment_parameters()
self.spaces.goal = self.spaces.action
self.spaces.goal.set_target_space(self.spaces.state[self.spaces.goal.goal_name])
if not self.ap.is_a_highest_level_agent:
self.spaces.reward.reward_success_threshold = self.spaces.goal.reward_type.goal_reaching_reward | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/hac_ddpg_agent.py | 0.735737 | 0.487673 | hac_ddpg_agent.py | pypi |
import os
import pickle
from typing import Union, List
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DNDQHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters
from rl_coach.core_types import RunPhase, EnvironmentSteps, Episode, StateType
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters, MemoryGranularity
from rl_coach.schedules import ConstantSchedule
class NECNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DNDQHeadParameters()]
self.optimizer_type = 'Adam'
self.should_get_softmax_probabilities = False
class NECAlgorithmParameters(AlgorithmParameters):
"""
:param dnd_size: (int)
Defines the number of transitions that will be stored in each one of the DNDs. Note that the total number
of transitions that will be stored is dnd_size x num_actions.
:param l2_norm_added_delta: (float)
A small value that will be added when calculating the weight of each of the DND entries. This follows the
:math:`\delta` patameter defined in the paper.
:param new_value_shift_coefficient: (float)
In the case where a ew embedding that was added to the DND was already present, the value that will be stored
in the DND is a mix between the existing value and the new value. The mix rate is defined by
new_value_shift_coefficient.
:param number_of_knn: (int)
The number of neighbors that will be retrieved for each DND query.
:param DND_key_error_threshold: (float)
When the DND is queried for a specific embedding, this threshold will be used to determine if the embedding
exists in the DND, since exact matches of embeddings are very rare.
:param propagate_updates_to_DND: (bool)
If set to True, when the gradients of the network will be calculated, the gradients will also be
backpropagated through the keys of the DND. The keys will then be updated as well, as if they were regular
network weights.
:param n_step: (int)
The bootstrap length that will be used when calculating the state values to store in the DND.
:param bootstrap_total_return_from_old_policy: (bool)
If set to True, the bootstrap that will be used to calculate each state-action value, is the network value
when the state was first seen, and not the latest, most up-to-date network value.
"""
def __init__(self):
super().__init__()
self.dnd_size = 500000
self.l2_norm_added_delta = 0.001
self.new_value_shift_coefficient = 0.1
self.number_of_knn = 50
self.DND_key_error_threshold = 0
self.num_consecutive_playing_steps = EnvironmentSteps(4)
self.propagate_updates_to_DND = False
self.n_step = 100
self.bootstrap_total_return_from_old_policy = True
class NECMemoryParameters(EpisodicExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 100000)
class NECAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NECAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=NECMemoryParameters(),
networks={"main": NECNetworkParameters()})
self.exploration.epsilon_schedule = ConstantSchedule(0.1)
self.exploration.evaluation_epsilon = 0.01
@property
def path(self):
return 'rl_coach.agents.nec_agent:NECAgent'
# Neural Episodic Control - https://arxiv.org/pdf/1703.01988.pdf
class NECAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_episode_state_embeddings = []
self.training_started = False
self.current_episode_buffer = \
Episode(discount=self.ap.algorithm.discount,
n_step=self.ap.algorithm.n_step,
bootstrap_total_return_from_old_policy=self.ap.algorithm.bootstrap_total_return_from_old_policy)
def learn_from_batch(self, batch):
if not self.networks['main'].online_network.output_heads[0].DND.has_enough_entries(self.ap.algorithm.number_of_knn):
return 0, [], 0
else:
if not self.training_started:
self.training_started = True
screen.log_title("Finished collecting initial entries in DND. Starting to train network...")
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
TD_targets = self.networks['main'].online_network.predict(batch.states(network_keys))
bootstrapped_return_from_old_policy = batch.n_step_discounted_rewards()
# only update the action that we have actually done in this transition
for i in range(batch.size):
TD_targets[i, batch.actions()[i]] = bootstrapped_return_from_old_policy[i]
# set the gradients to fetch for the DND update
fetches = []
head = self.networks['main'].online_network.output_heads[0]
if self.ap.algorithm.propagate_updates_to_DND:
fetches = [head.dnd_embeddings_grad, head.dnd_values_grad, head.dnd_indices]
# train the neural network
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets, fetches)
total_loss, losses, unclipped_grads = result[:3]
# update the DND keys and values using the extracted gradients
if self.ap.algorithm.propagate_updates_to_DND:
embedding_gradients = np.swapaxes(result[-1][0], 0, 1)
value_gradients = np.swapaxes(result[-1][1], 0, 1)
indices = np.swapaxes(result[-1][2], 0, 1)
head.DND.update_keys_and_values(batch.actions(), embedding_gradients, value_gradients, indices)
return total_loss, losses, unclipped_grads
def act(self):
if self.phase == RunPhase.HEATUP:
# get embedding in heatup (otherwise we get it through get_prediction)
embedding = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(self.curr_state, 'main'),
outputs=self.networks['main'].online_network.state_embedding)
self.current_episode_state_embeddings.append(embedding.squeeze())
return super().act()
def get_all_q_values_for_states(self, states: StateType, additional_outputs: List = None):
# we need to store the state embeddings regardless if the action is random or not
return self.get_prediction_and_update_embeddings(states)
def get_all_q_values_for_states_and_softmax_probabilities(self, states: StateType):
# get the actions q values and the state embedding
embedding, actions_q_values, softmax_probabilities = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(states, 'main'),
outputs=[self.networks['main'].online_network.state_embedding,
self.networks['main'].online_network.output_heads[0].output,
self.networks['main'].online_network.output_heads[0].softmax]
)
if self.phase != RunPhase.TEST:
# store the state embedding for inserting it to the DND later
self.current_episode_state_embeddings.append(embedding.squeeze())
actions_q_values = actions_q_values[0][0]
return actions_q_values, softmax_probabilities
def get_prediction_and_update_embeddings(self, states):
# get the actions q values and the state embedding
embedding, actions_q_values = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(states, 'main'),
outputs=[self.networks['main'].online_network.state_embedding,
self.networks['main'].online_network.output_heads[0].output]
)
if self.phase != RunPhase.TEST:
# store the state embedding for inserting it to the DND later
self.current_episode_state_embeddings.append(embedding[0].squeeze())
actions_q_values = actions_q_values[0][0]
return actions_q_values
def reset_internal_state(self):
super().reset_internal_state()
self.current_episode_state_embeddings = []
self.current_episode_buffer = \
Episode(discount=self.ap.algorithm.discount,
n_step=self.ap.algorithm.n_step,
bootstrap_total_return_from_old_policy=self.ap.algorithm.bootstrap_total_return_from_old_policy)
def handle_episode_ended(self):
super().handle_episode_ended()
# get the last full episode that we have collected
episode = self.call_memory('get_last_complete_episode')
if episode is not None and self.phase != RunPhase.TEST:
assert len(self.current_episode_state_embeddings) == episode.length()
discounted_rewards = episode.get_transitions_attribute('n_step_discounted_rewards')
actions = episode.get_transitions_attribute('action')
self.networks['main'].online_network.output_heads[0].DND.add(self.current_episode_state_embeddings,
actions, discounted_rewards)
def save_checkpoint(self, checkpoint_prefix):
super().save_checkpoint(checkpoint_prefix)
with open(os.path.join(self.ap.task_parameters.checkpoint_save_dir, str(checkpoint_prefix) + '.dnd'), 'wb') as f:
pickle.dump(self.networks['main'].online_network.output_heads[0].DND, f, pickle.HIGHEST_PROTOCOL) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/nec_agent.py | 0.903955 | 0.383786 | nec_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PolicyHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
class PolicyGradientNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [PolicyHeadParameters()]
self.async_training = True
class PolicyGradientAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
The rescaler type to use for the policy gradient loss. For policy gradients, we calculate log probability of
the action and then multiply it by the policy gradient rescaler. The most basic rescaler is the discounter
return, but there are other rescalers that are intended for reducing the variance of the updates.
:param apply_gradients_every_x_episodes: (int)
The number of episodes between applying the accumulated gradients to the network. After every
num_steps_between_gradient_updates steps, the agent will calculate the gradients for the collected data,
it will then accumulate it in internal accumulators, and will only apply them to the network once in every
apply_gradients_every_x_episodes episodes.
:param beta_entropy: (float)
A factor which defines the amount of entropy regularization to apply to the network. The entropy of the actions
will be added to the loss and scaled by the given beta factor.
:param num_steps_between_gradient_updates: (int)
The number of steps between calculating gradients for the collected data. In the A3C paper, this parameter is
called t_max. Since this algorithm is on-policy, only the steps collected between each two gradient calculations
are used in the batch.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_TIMESTEP
self.apply_gradients_every_x_episodes = 5
self.beta_entropy = 0
self.num_steps_between_gradient_updates = 20000 # this is called t_max in all the papers
class PolicyGradientsAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=PolicyGradientAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: AdditiveNoiseParameters()},
memory=SingleEpisodeBufferParameters(),
networks={"main": PolicyGradientNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.policy_gradients_agent:PolicyGradientsAgent'
class PolicyGradientsAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.returns_mean = self.register_signal('Returns Mean')
self.returns_variance = self.register_signal('Returns Variance')
self.last_gradient_update_step_idx = 0
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
total_returns = batch.n_step_discounted_rewards()
for i in reversed(range(batch.size)):
if self.policy_gradient_rescaler == PolicyGradientRescaler.TOTAL_RETURN:
total_returns[i] = total_returns[0]
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN:
# just take the total return as it is
pass
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_EPISODE:
# we can get a single transition episode while playing Doom Basic, causing the std to be 0
if self.std_discounted_return != 0:
total_returns[i] = (total_returns[i] - self.mean_discounted_return) / self.std_discounted_return
else:
total_returns[i] = 0
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_TIMESTEP:
total_returns[i] -= self.mean_return_over_multiple_episodes[i]
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
targets = total_returns
actions = batch.actions()
if type(self.spaces.action) != DiscreteActionSpace and len(actions.shape) < 2:
actions = np.expand_dims(actions, -1)
self.returns_mean.add_sample(np.mean(total_returns))
self.returns_variance.add_sample(np.std(total_returns))
result = self.networks['main'].online_network.accumulate_gradients(
{**batch.states(network_keys), 'output_0_0': actions}, targets
)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/policy_gradients_agent.py | 0.950365 | 0.443058 | policy_gradients_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, NetworkParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.utils import last_sample
class NStepQNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [QHeadParameters()]
self.optimizer_type = 'Adam'
self.async_training = True
self.shared_optimizer = True
self.create_target_network = True
class NStepQAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param apply_gradients_every_x_episodes: (int)
The number of episodes between applying the accumulated gradients to the network. After every
num_steps_between_gradient_updates steps, the agent will calculate the gradients for the collected data,
it will then accumulate it in internal accumulators, and will only apply them to the network once in every
apply_gradients_every_x_episodes episodes.
:param num_steps_between_gradient_updates: (int)
The number of steps between calculating gradients for the collected data. In the A3C paper, this parameter is
called t_max. Since this algorithm is on-policy, only the steps collected between each two gradient calculations
are used in the batch.
:param targets_horizon: (str)
Should be either 'N-Step' or '1-Step', and defines the length for which to bootstrap the network values over.
Essentially, 1-Step follows the regular 1 step bootstrapping Q learning update. For more information,
please refer to the original paper (https://arxiv.org/abs/1602.01783)
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(10000)
self.apply_gradients_every_x_episodes = 1
self.num_steps_between_gradient_updates = 5 # this is called t_max in all the papers
self.targets_horizon = 'N-Step'
class NStepQAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NStepQAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=SingleEpisodeBufferParameters(),
networks={"main": NStepQNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.n_step_q_agent:NStepQAgent'
# N Step Q Learning Agent - https://arxiv.org/abs/1602.01783
class NStepQAgent(ValueOptimizationAgent, PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.q_values = self.register_signal('Q Values')
self.value_loss = self.register_signal('Value Loss')
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
state_value_head_targets = self.networks['main'].online_network.predict(batch.states(network_keys))
# the targets for the state value estimator
if self.ap.algorithm.targets_horizon == '1-Step':
# 1-Step Q learning
q_st_plus_1 = self.networks['main'].target_network.predict(batch.next_states(network_keys))
for i in reversed(range(batch.size)):
state_value_head_targets[i][batch.actions()[i]] = \
batch.rewards()[i] \
+ (1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * np.max(q_st_plus_1[i], 0)
elif self.ap.algorithm.targets_horizon == 'N-Step':
# N-Step Q learning
if batch.game_overs()[-1]:
R = 0
else:
R = np.max(self.networks['main'].target_network.predict(last_sample(batch.next_states(network_keys))))
for i in reversed(range(batch.size)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i][batch.actions()[i]] = R
else:
assert True, 'The available values for targets_horizon are: 1-Step, N-Step'
# add Q value samples for logging
self.q_values.add_sample(state_value_head_targets)
# train
result = self.networks['main'].online_network.accumulate_gradients(batch.states(network_keys), [state_value_head_targets])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.value_loss.add_sample(losses[0])
return total_loss, losses, unclipped_grads
def train(self):
# update the target network of every network that has a target network
if any([network.has_target for network in self.networks.values()]) \
and self._should_update_online_weights_to_target():
for network in self.networks.values():
network.update_target_network(self.ap.algorithm.rate_for_copying_weights_to_target)
self.agent_logger.create_signal_value('Update Target Network', 1)
else:
self.agent_logger.create_signal_value('Update Target Network', 0, overwrite=False)
return PolicyOptimizationAgent.train(self) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/n_step_q_agent.py | 0.924947 | 0.416678 | n_step_q_agent.py | pypi |
from typing import Union, List, Dict
import numpy as np
from rl_coach.core_types import EnvResponse, ActionInfo, RunPhase, PredictionType, ActionType, Transition
from rl_coach.saver import SaverCollection
class AgentInterface(object):
def __init__(self):
self._phase = RunPhase.HEATUP
self._parent = None
self.spaces = None
@property
def parent(self):
"""
Get the parent class of the agent
:return: the current phase
"""
return self._parent
@parent.setter
def parent(self, val):
"""
Change the parent class of the agent
:param val: the new parent
:return: None
"""
self._parent = val
@property
def phase(self) -> RunPhase:
"""
Get the phase of the agent
:return: the current phase
"""
return self._phase
@phase.setter
def phase(self, val: RunPhase):
"""
Change the phase of the agent
:param val: the new phase
:return: None
"""
self._phase = val
def reset_internal_state(self) -> None:
"""
Reset the episode parameters for the agent
:return: None
"""
raise NotImplementedError("")
def train(self) -> Union[float, List]:
"""
Train the agents network
:return: The loss of the training
"""
raise NotImplementedError("")
def act(self) -> ActionInfo:
"""
Get a decision of the next action to take.
The action is dependent on the current state which the agent holds from resetting the environment or from
the observe function.
:return: A tuple containing the actual action and additional info on the action
"""
raise NotImplementedError("")
def observe(self, env_response: EnvResponse) -> bool:
"""
Gets a response from the environment.
Processes this information for later use. For example, create a transition and store it in memory.
The action info (a class containing any info the agent wants to store regarding its action decision process) is
stored by the agent itself when deciding on the action.
:param env_response: a EnvResponse containing the response from the environment
:return: a done signal which is based on the agent knowledge. This can be different from the done signal from
the environment. For example, an agent can decide to finish the episode each time it gets some
intrinsic reward
"""
raise NotImplementedError("")
def save_checkpoint(self, checkpoint_prefix: str) -> None:
"""
Save the model of the agent to the disk. This can contain the network parameters, the memory of the agent, etc.
:param checkpoint_prefix: The prefix of the checkpoint file to save
:return: None
"""
raise NotImplementedError("")
def get_predictions(self, states: Dict, prediction_type: PredictionType) -> np.ndarray:
"""
Get a prediction from the agent with regard to the requested prediction_type. If the agent cannot predict this
type of prediction_type, or if there is more than possible way to do so, raise a ValueException.
:param states:
:param prediction_type:
:return: the agent's prediction
"""
raise NotImplementedError("")
def set_incoming_directive(self, action: ActionType) -> None:
"""
Pass a higher level command (directive) to the agent.
For example, a higher level agent can set the goal of the agent.
:param action: the directive to pass to the agent
:return: None
"""
raise NotImplementedError("")
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collect all of agent savers
:param parent_path_suffix: path suffix of the parent of the agent
(could be name of level manager or composite agent)
:return: collection of all agent savers
"""
raise NotImplementedError("")
def handle_episode_ended(self) -> None:
"""
Make any changes needed when each episode is ended.
This includes incrementing counters, updating full episode dependent values, updating logs, etc.
This function is called right after each episode is ended.
:return: None
"""
raise NotImplementedError("")
def run_off_policy_evaluation(self) -> None:
"""
Run off-policy evaluation estimators to evaluate the trained policy performance against a dataset.
Should only be implemented for off-policy RL algorithms.
:return: None
"""
raise NotImplementedError("") | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/agent_interface.py | 0.932538 | 0.649064 | agent_interface.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNAgentParameters, DQNAlgorithmParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
class PALAlgorithmParameters(DQNAlgorithmParameters):
"""
:param pal_alpha: (float)
A factor that weights the amount by which the advantage learning update will be taken into account.
:param persistent_advantage_learning: (bool)
If set to True, the persistent mode of advantage learning will be used, which encourages the agent to take
the same actions one after the other instead of changing actions.
:param monte_carlo_mixing_rate: (float)
The amount of monte carlo values to mix into the targets of the network. The monte carlo values are just the
total discounted returns, and they can help reduce the time it takes for the network to update to the newly
seen values, since it is not based on bootstrapping the current network values.
"""
def __init__(self):
super().__init__()
self.pal_alpha = 0.9
self.persistent_advantage_learning = False
self.monte_carlo_mixing_rate = 0.1
class PALAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = PALAlgorithmParameters()
self.memory = EpisodicExperienceReplayParameters()
@property
def path(self):
return 'rl_coach.agents.pal_agent:PALAgent'
# Persistent Advantage Learning - https://arxiv.org/pdf/1512.04860.pdf
class PALAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.alpha = agent_parameters.algorithm.pal_alpha
self.persistent = agent_parameters.algorithm.persistent_advantage_learning
self.monte_carlo_mixing_rate = agent_parameters.algorithm.monte_carlo_mixing_rate
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# next state values
q_st_plus_1_target, q_st_plus_1_online = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.next_states(network_keys))
])
selected_actions = np.argmax(q_st_plus_1_online, 1)
v_st_plus_1_target = np.max(q_st_plus_1_target, 1)
# current state values
q_st_target, q_st_online = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
v_st_target = np.max(q_st_target, 1)
# calculate TD error
TD_targets = np.copy(q_st_online)
total_returns = batch.n_step_discounted_rewards()
for i in range(batch.size):
TD_targets[i, batch.actions()[i]] = batch.rewards()[i] + \
(1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * \
q_st_plus_1_target[i][selected_actions[i]]
advantage_learning_update = v_st_target[i] - q_st_target[i, batch.actions()[i]]
next_advantage_learning_update = v_st_plus_1_target[i] - q_st_plus_1_target[i, selected_actions[i]]
# Persistent Advantage Learning or Regular Advantage Learning
if self.persistent:
TD_targets[i, batch.actions()[i]] -= self.alpha * min(advantage_learning_update, next_advantage_learning_update)
else:
TD_targets[i, batch.actions()[i]] -= self.alpha * advantage_learning_update
# mixing monte carlo updates
monte_carlo_target = total_returns[i]
TD_targets[i, batch.actions()[i]] = (1 - self.monte_carlo_mixing_rate) * TD_targets[i, batch.actions()[i]] \
+ self.monte_carlo_mixing_rate * monte_carlo_target
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/pal_agent.py | 0.909963 | 0.464719 | pal_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNNetworkParameters, DQNAgentParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.exploration_policies.bootstrapped import BootstrappedParameters
class BootstrappedDQNNetworkParameters(DQNNetworkParameters):
def __init__(self):
super().__init__()
self.heads_parameters[0].num_output_head_copies = 10
self.heads_parameters[0].rescale_gradient_from_head_by_factor = 1.0/self.heads_parameters[0].num_output_head_copies
class BootstrappedDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.exploration = BootstrappedParameters()
self.network_wrappers = {"main": BootstrappedDQNNetworkParameters()}
@property
def path(self):
return 'rl_coach.agents.bootstrapped_dqn_agent:BootstrappedDQNAgent'
# Bootstrapped DQN - https://arxiv.org/pdf/1602.04621.pdf
class BootstrappedDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
def reset_internal_state(self):
super().reset_internal_state()
self.exploration_policy.select_head()
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
next_states_online_values = self.networks['main'].online_network.predict(batch.next_states(network_keys))
result = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
q_st_plus_1 = result[:self.ap.exploration.architecture_num_q_heads]
TD_targets = result[self.ap.exploration.architecture_num_q_heads:]
# add Q value samples for logging
# initialize with the current prediction so that we will
# only update the action that we have actually done in this transition
for i in range(batch.size):
mask = batch[i].info['mask']
for head_idx in range(self.ap.exploration.architecture_num_q_heads):
self.q_values.add_sample(TD_targets[head_idx])
if mask[head_idx] == 1:
selected_action = np.argmax(next_states_online_values[head_idx][i], 0)
TD_targets[head_idx][i, batch.actions()[i]] = \
batch.rewards()[i] + (1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount \
* q_st_plus_1[head_idx][i][selected_action]
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def observe(self, env_response):
mask = np.random.binomial(1, self.ap.exploration.bootstrapped_data_sharing_probability,
self.ap.exploration.architecture_num_q_heads)
env_response.info['mask'] = mask
return super().observe(env_response) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/bootstrapped_dqn_agent.py | 0.895651 | 0.238972 | bootstrapped_dqn_agent.py | pypi |
from copy import copy
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNAgentParameters, DQNNetworkParameters, DQNAlgorithmParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.head_parameters import QuantileRegressionQHeadParameters
from rl_coach.core_types import StateType
from rl_coach.schedules import LinearSchedule
class QuantileRegressionDQNNetworkParameters(DQNNetworkParameters):
def __init__(self):
super().__init__()
self.heads_parameters = [QuantileRegressionQHeadParameters()]
self.learning_rate = 0.00005
self.optimizer_epsilon = 0.01 / 32
class QuantileRegressionDQNAlgorithmParameters(DQNAlgorithmParameters):
"""
:param atoms: (int)
the number of atoms to predict for each action
:param huber_loss_interval: (float)
One of the huber loss parameters, and is referred to as :math:`\kapa` in the paper.
It describes the interval [-k, k] in which the huber loss acts as a MSE loss.
"""
def __init__(self):
super().__init__()
self.atoms = 200
self.huber_loss_interval = 1 # called k in the paper
class QuantileRegressionDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = QuantileRegressionDQNAlgorithmParameters()
self.network_wrappers = {"main": QuantileRegressionDQNNetworkParameters()}
self.exploration.epsilon_schedule = LinearSchedule(1, 0.01, 1000000)
self.exploration.evaluation_epsilon = 0.001
@property
def path(self):
return 'rl_coach.agents.qr_dqn_agent:QuantileRegressionDQNAgent'
# Quantile Regression Deep Q Network - https://arxiv.org/pdf/1710.10044v1.pdf
class QuantileRegressionDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.quantile_probabilities = np.ones(self.ap.algorithm.atoms) / float(self.ap.algorithm.atoms)
def get_q_values(self, quantile_values):
return np.dot(quantile_values, self.quantile_probabilities)
# prediction's format is (batch,actions,atoms)
def get_all_q_values_for_states(self, states: StateType):
if self.exploration_policy.requires_action_values():
quantile_values = self.get_prediction(states)
actions_q_values = self.get_q_values(quantile_values)
else:
actions_q_values = None
return actions_q_values
# prediction's format is (batch,actions,atoms)
def get_all_q_values_for_states_and_softmax_probabilities(self, states: StateType):
actions_q_values, softmax_probabilities = None, None
if self.exploration_policy.requires_action_values():
outputs = copy(self.networks['main'].online_network.outputs)
outputs.append(self.networks['main'].online_network.output_heads[0].softmax)
quantile_values, softmax_probabilities = self.get_prediction(states, outputs)
actions_q_values = self.get_q_values(quantile_values)
return actions_q_values, softmax_probabilities
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the quantiles of the next states and current states
next_state_quantiles, current_quantiles = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
# add Q value samples for logging
self.q_values.add_sample(self.get_q_values(current_quantiles))
# get the optimal actions to take for the next states
target_actions = np.argmax(self.get_q_values(next_state_quantiles), axis=1)
# calculate the Bellman update
batch_idx = list(range(batch.size))
TD_targets = batch.rewards(True) + (1.0 - batch.game_overs(True)) * self.ap.algorithm.discount \
* next_state_quantiles[batch_idx, target_actions]
# get the locations of the selected actions within the batch for indexing purposes
actions_locations = [[b, a] for b, a in zip(batch_idx, batch.actions())]
# calculate the cumulative quantile probabilities and reorder them to fit the sorted quantiles order
cumulative_probabilities = np.array(range(self.ap.algorithm.atoms + 1)) / float(self.ap.algorithm.atoms) # tau_i
quantile_midpoints = 0.5*(cumulative_probabilities[1:] + cumulative_probabilities[:-1]) # tau^hat_i
quantile_midpoints = np.tile(quantile_midpoints, (batch.size, 1))
sorted_quantiles = np.argsort(current_quantiles[batch_idx, batch.actions()])
for idx in range(batch.size):
quantile_midpoints[idx, :] = quantile_midpoints[idx, sorted_quantiles[idx]]
# train
result = self.networks['main'].train_and_sync_networks({
**batch.states(network_keys),
'output_0_0': actions_locations,
'output_0_1': quantile_midpoints,
}, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/qr_dqn_agent.py | 0.932829 | 0.532911 | qr_dqn_agent.py | pypi |
import copy
from collections import OrderedDict
from typing import Union
import numpy as np
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
from rl_coach.agents.policy_optimization_agent import PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PPOHeadParameters, VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \
AgentParameters, DistributedTaskParameters
from rl_coach.core_types import EnvironmentSteps, Batch
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.utils import force_list
class PPOCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='tanh')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='tanh')
self.heads_parameters = [VHeadParameters()]
self.async_training = True
self.l2_regularization = 0
self.create_target_network = True
self.batch_size = 128
class PPOActorNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='tanh')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='tanh')
self.heads_parameters = [PPOHeadParameters()]
self.optimizer_type = 'Adam'
self.async_training = True
self.l2_regularization = 0
self.create_target_network = True
self.batch_size = 128
class PPOAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
This represents how the critic will be used to update the actor. The critic value function is typically used
to rescale the gradients calculated by the actor. There are several ways for doing this, such as using the
advantage of the action, or the generalized advantage estimation (GAE) value.
:param gae_lambda: (float)
The :math:`\lambda` value is used within the GAE function in order to weight different bootstrap length
estimations. Typical values are in the range 0.9-1, and define an exponential decay over the different
n-step estimations.
:param target_kl_divergence: (float)
The target kl divergence between the current policy distribution and the new policy. PPO uses a heuristic to
bring the KL divergence to this value, by adding a penalty if the kl divergence is higher.
:param initial_kl_coefficient: (float)
The initial weight that will be given to the KL divergence between the current and the new policy in the
regularization factor.
:param high_kl_penalty_coefficient: (float)
The penalty that will be given for KL divergence values which are highes than what was defined as the target.
:param clip_likelihood_ratio_using_epsilon: (float)
If not None, the likelihood ratio between the current and new policy in the PPO loss function will be
clipped to the range [1-clip_likelihood_ratio_using_epsilon, 1+clip_likelihood_ratio_using_epsilon].
This is typically used in the Clipped PPO version of PPO, and should be set to None in regular PPO
implementations.
:param value_targets_mix_fraction: (float)
The targets for the value network are an exponential weighted moving average which uses this mix fraction to
define how much of the new targets will be taken into account when calculating the loss.
This value should be set to the range (0,1], where 1 means that only the new targets will be taken into account.
:param estimate_state_value_using_gae: (bool)
If set to True, the state value will be estimated using the GAE technique.
:param use_kl_regularization: (bool)
If set to True, the loss function will be regularized using the KL diveregence between the current and new
policy, to bound the change of the policy during the network update.
:param beta_entropy: (float)
An entropy regulaization term can be added to the loss function in order to control exploration. This term
is weighted using the :math:`\beta` value defined by beta_entropy.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.GAE
self.gae_lambda = 0.96
self.target_kl_divergence = 0.01
self.initial_kl_coefficient = 1.0
self.high_kl_penalty_coefficient = 1000
self.clip_likelihood_ratio_using_epsilon = None
self.value_targets_mix_fraction = 0.1
self.estimate_state_value_using_gae = True
self.use_kl_regularization = True
self.beta_entropy = 0.01
self.num_consecutive_playing_steps = EnvironmentSteps(5000)
self.act_for_full_episodes = True
class PPOAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=PPOAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: AdditiveNoiseParameters()},
memory=EpisodicExperienceReplayParameters(),
networks={"critic": PPOCriticNetworkParameters(), "actor": PPOActorNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.ppo_agent:PPOAgent'
# Proximal Policy Optimization - https://arxiv.org/pdf/1707.06347.pdf
class PPOAgent(ActorCriticAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
# signals definition
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
self.kl_divergence = self.register_signal('KL Divergence')
self.total_kl_divergence_during_training_process = 0.0
self.unclipped_grads = self.register_signal('Grads (unclipped)')
def fill_advantages(self, batch):
batch = Batch(batch)
network_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# * Found not to have any impact *
# current_states_with_timestep = self.concat_state_and_timestep(batch)
current_state_values = self.networks['critic'].online_network.predict(batch.states(network_keys)).squeeze()
total_returns = batch.n_step_discounted_rewards()
# calculate advantages
advantages = []
if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:
advantages = total_returns - current_state_values
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
episode_start_idx = 0
advantages = np.array([])
# current_state_values[batch.game_overs()] = 0
for idx, game_over in enumerate(batch.game_overs()):
if game_over:
# get advantages for the rollout
value_bootstrapping = np.zeros((1,))
rollout_state_values = np.append(current_state_values[episode_start_idx:idx+1], value_bootstrapping)
rollout_advantages, _ = \
self.get_general_advantage_estimation_values(batch.rewards()[episode_start_idx:idx+1],
rollout_state_values)
episode_start_idx = idx + 1
advantages = np.append(advantages, rollout_advantages)
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
# standardize
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# TODO: this will be problematic with a shared memory
for transition, advantage in zip(self.memory.transitions, advantages):
transition.info['advantage'] = advantage
self.action_advantages.add_sample(advantages)
def train_value_network(self, dataset, epochs):
loss = []
batch = Batch(dataset)
network_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# * Found not to have any impact *
# add a timestep to the observation
# current_states_with_timestep = self.concat_state_and_timestep(dataset)
mix_fraction = self.ap.algorithm.value_targets_mix_fraction
total_returns = batch.n_step_discounted_rewards(True)
for j in range(epochs):
curr_batch_size = batch.size
if self.networks['critic'].online_network.optimizer_type != 'LBFGS':
curr_batch_size = self.ap.network_wrappers['critic'].batch_size
for i in range(batch.size // curr_batch_size):
# split to batches for first order optimization techniques
current_states_batch = {
k: v[i * curr_batch_size:(i + 1) * curr_batch_size]
for k, v in batch.states(network_keys).items()
}
total_return_batch = total_returns[i * curr_batch_size:(i + 1) * curr_batch_size]
old_policy_values = force_list(self.networks['critic'].target_network.predict(
current_states_batch).squeeze())
if self.networks['critic'].online_network.optimizer_type != 'LBFGS':
targets = total_return_batch
else:
current_values = self.networks['critic'].online_network.predict(current_states_batch)
targets = current_values * (1 - mix_fraction) + total_return_batch * mix_fraction
inputs = copy.copy(current_states_batch)
for input_index, input in enumerate(old_policy_values):
name = 'output_0_{}'.format(input_index)
if name in self.networks['critic'].online_network.inputs:
inputs[name] = input
value_loss = self.networks['critic'].online_network.accumulate_gradients(inputs, targets)
self.networks['critic'].apply_gradients_to_online_network()
if isinstance(self.ap.task_parameters, DistributedTaskParameters):
self.networks['critic'].apply_gradients_to_global_network()
self.networks['critic'].online_network.reset_accumulated_gradients()
loss.append([value_loss[0]])
loss = np.mean(loss, 0)
return loss
def concat_state_and_timestep(self, dataset):
current_states_with_timestep = [np.append(transition.state['observation'], transition.info['timestep'])
for transition in dataset]
current_states_with_timestep = np.expand_dims(current_states_with_timestep, -1)
return current_states_with_timestep
def train_policy_network(self, dataset, epochs):
loss = []
for j in range(epochs):
loss = {
'total_loss': [],
'policy_losses': [],
'unclipped_grads': [],
'fetch_result': []
}
#shuffle(dataset)
for i in range(len(dataset) // self.ap.network_wrappers['actor'].batch_size):
batch = Batch(dataset[i * self.ap.network_wrappers['actor'].batch_size:
(i + 1) * self.ap.network_wrappers['actor'].batch_size])
network_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
advantages = batch.info('advantage')
actions = batch.actions()
if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) == 1:
actions = np.expand_dims(actions, -1)
# get old policy probabilities and distribution
old_policy = force_list(self.networks['actor'].target_network.predict(batch.states(network_keys)))
# calculate gradients and apply on both the local policy network and on the global policy network
fetches = [self.networks['actor'].online_network.output_heads[0].kl_divergence,
self.networks['actor'].online_network.output_heads[0].entropy]
inputs = copy.copy(batch.states(network_keys))
inputs['output_0_0'] = actions
# old_policy_distribution needs to be represented as a list, because in the event of discrete controls,
# it has just a mean. otherwise, it has both a mean and standard deviation
for input_index, input in enumerate(old_policy):
inputs['output_0_{}'.format(input_index + 1)] = input
total_loss, policy_losses, unclipped_grads, fetch_result =\
self.networks['actor'].online_network.accumulate_gradients(
inputs, [advantages], additional_fetches=fetches)
self.networks['actor'].apply_gradients_to_online_network()
if isinstance(self.ap.task_parameters, DistributedTaskParameters):
self.networks['actor'].apply_gradients_to_global_network()
self.networks['actor'].online_network.reset_accumulated_gradients()
loss['total_loss'].append(total_loss)
loss['policy_losses'].append(policy_losses)
loss['unclipped_grads'].append(unclipped_grads)
loss['fetch_result'].append(fetch_result)
self.unclipped_grads.add_sample(unclipped_grads)
for key in loss.keys():
loss[key] = np.mean(loss[key], 0)
if self.ap.network_wrappers['critic'].learning_rate_decay_rate != 0:
curr_learning_rate = self.networks['critic'].online_network.get_variable_value(self.ap.learning_rate)
self.curr_learning_rate.add_sample(curr_learning_rate)
else:
curr_learning_rate = self.ap.network_wrappers['critic'].learning_rate
# log training parameters
screen.log_dict(
OrderedDict([
("Surrogate loss", loss['policy_losses'][0]),
("KL divergence", loss['fetch_result'][0]),
("Entropy", loss['fetch_result'][1]),
("training epoch", j),
("learning_rate", curr_learning_rate)
]),
prefix="Policy training"
)
self.total_kl_divergence_during_training_process = loss['fetch_result'][0]
self.entropy.add_sample(loss['fetch_result'][1])
self.kl_divergence.add_sample(loss['fetch_result'][0])
return loss['total_loss']
def update_kl_coefficient(self):
# John Schulman takes the mean kl divergence only over the last epoch which is strange but we will follow
# his implementation for now because we know it works well
screen.log_title("KL = {}".format(self.total_kl_divergence_during_training_process))
# update kl coefficient
kl_target = self.ap.algorithm.target_kl_divergence
kl_coefficient = self.networks['actor'].online_network.get_variable_value(
self.networks['actor'].online_network.output_heads[0].kl_coefficient)
new_kl_coefficient = kl_coefficient
if self.total_kl_divergence_during_training_process > 1.3 * kl_target:
# kl too high => increase regularization
new_kl_coefficient *= 1.5
elif self.total_kl_divergence_during_training_process < 0.7 * kl_target:
# kl too low => decrease regularization
new_kl_coefficient /= 1.5
# update the kl coefficient variable
if kl_coefficient != new_kl_coefficient:
self.networks['actor'].online_network.set_variable_value(
self.networks['actor'].online_network.output_heads[0].assign_kl_coefficient,
new_kl_coefficient,
self.networks['actor'].online_network.output_heads[0].kl_coefficient_ph)
screen.log_title("KL penalty coefficient change = {} -> {}".format(kl_coefficient, new_kl_coefficient))
def post_training_commands(self):
if self.ap.algorithm.use_kl_regularization:
self.update_kl_coefficient()
# clean memory
self.call_memory('clean')
def train(self):
loss = 0
if self._should_train():
for network in self.networks.values():
network.set_is_training(True)
for training_step in range(self.ap.algorithm.num_consecutive_training_steps):
self.networks['actor'].sync()
self.networks['critic'].sync()
dataset = self.memory.transitions
self.fill_advantages(dataset)
# take only the requested number of steps
dataset = dataset[:self.ap.algorithm.num_consecutive_playing_steps.num_steps]
value_loss = self.train_value_network(dataset, 1)
policy_loss = self.train_policy_network(dataset, 10)
self.value_loss.add_sample(value_loss)
self.policy_loss.add_sample(policy_loss)
for network in self.networks.values():
network.set_is_training(False)
self.post_training_commands()
self.training_iteration += 1
self.update_log() # should be done in order to update the data that has been accumulated * while not playing *
return np.append(value_loss, policy_loss)
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "actor")
return self.networks['actor'].online_network.predict(tf_input_state) | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/ppo_agent.py | 0.9415 | 0.441914 | ppo_agent.py | pypi |
from typing import Union
import numpy as np
import scipy.signal
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PolicyHeadParameters, VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \
AgentParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.exploration_policies.continuous_entropy import ContinuousEntropyParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.utils import last_sample
class ActorCriticAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
The value that will be used to rescale the policy gradient
:param apply_gradients_every_x_episodes: (int)
The number of episodes to wait before applying the accumulated gradients to the network.
The training iterations only accumulate gradients without actually applying them.
:param beta_entropy: (float)
The weight that will be given to the entropy regularization which is used in order to improve exploration.
:param num_steps_between_gradient_updates: (int)
Every num_steps_between_gradient_updates transitions will be considered as a single batch and use for
accumulating gradients. This is also the number of steps used for bootstrapping according to the n-step formulation.
:param gae_lambda: (float)
If the policy gradient rescaler was defined as PolicyGradientRescaler.GAE, the generalized advantage estimation
scheme will be used, in which case the lambda value controls the decay for the different n-step lengths.
:param estimate_state_value_using_gae: (bool)
If set to True, the state value targets for the V head will be estimated using the GAE scheme.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE
self.apply_gradients_every_x_episodes = 5
self.beta_entropy = 0
self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers
self.gae_lambda = 0.96
self.estimate_state_value_using_gae = False
class ActorCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [VHeadParameters(loss_weight=0.5), PolicyHeadParameters(loss_weight=1.0)]
self.optimizer_type = 'Adam'
self.clip_gradients = 40.0
self.async_training = True
class ActorCriticAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=ActorCriticAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: ContinuousEntropyParameters()},
memory=SingleEpisodeBufferParameters(),
networks={"main": ActorCriticNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent'
# Actor Critic - https://arxiv.org/abs/1602.01783
class ActorCriticAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.action_advantages = self.register_signal('Advantages')
self.state_values = self.register_signal('Values')
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
# Discounting function used to calculate discounted returns.
def discount(self, x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def get_general_advantage_estimation_values(self, rewards, values):
# values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n)
bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]])
# Approximation based calculation of GAE (mathematically correct only when Tmax = inf,
# although in practice works even in much smaller Tmax values, e.g. 20)
deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1]
gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda)
if self.ap.algorithm.estimate_state_value_using_gae:
discounted_returns = np.expand_dims(gae + values[:-1], -1)
else:
discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards,
self.ap.algorithm.discount)), 1)[:-1]
return gae, discounted_returns
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
result = self.networks['main'].online_network.predict(batch.states(network_keys))
current_state_values = result[0]
self.state_values.add_sample(current_state_values)
# the targets for the state value estimator
num_transitions = batch.size
state_value_head_targets = np.zeros((num_transitions, 1))
# estimate the advantage function
action_advantages = np.zeros((num_transitions, 1))
if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:
if batch.game_overs()[-1]:
R = 0
else:
R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
for i in reversed(range(num_transitions)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i] = R
action_advantages[i] = R - current_state_values[i]
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
values = np.append(current_state_values, bootstrapped_value)
if batch.game_overs()[-1]:
values[-1] = 0
# get general discounted returns table
gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values)
action_advantages = np.vstack(gae_values)
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
action_advantages = action_advantages.squeeze(axis=-1)
actions = batch.actions()
if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2:
actions = np.expand_dims(actions, -1)
# train
result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys),
'output_1_0': actions},
[state_value_head_targets, action_advantages])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.action_advantages.add_sample(action_advantages)
self.unclipped_grads.add_sample(unclipped_grads)
self.value_loss.add_sample(losses[0])
self.policy_loss.add_sample(losses[1])
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "main")
return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/agents/actor_critic_agent.py | 0.935568 | 0.480052 | actor_critic_agent.py | pypi |
import redis
import pickle
import uuid
import time
from rl_coach.memories.backend.memory import MemoryBackend, MemoryBackendParameters
from rl_coach.core_types import Transition, Episode, EnvironmentSteps, EnvironmentEpisodes
class RedisPubSubMemoryBackendParameters(MemoryBackendParameters):
def __init__(self, redis_address: str="", redis_port: int=6379, channel: str="channel-{}".format(uuid.uuid4()),
orchestrator_params: dict=None, run_type='trainer', orchestrator_type: str = "kubernetes", deployed: str = False):
self.redis_address = redis_address
self.redis_port = redis_port
self.channel = channel
if not orchestrator_params:
orchestrator_params = {}
self.orchestrator_params = orchestrator_params
self.run_type = run_type
self.store_type = "redispubsub"
self.orchestrator_type = orchestrator_type
self.deployed = deployed
class RedisPubSubBackend(MemoryBackend):
"""
A memory backend which transfers the experiences from the rollout to the training worker using Redis Pub/Sub in
Coach when distributed mode is used.
"""
def __init__(self, params: RedisPubSubMemoryBackendParameters):
"""
:param params: The Redis parameters to be used with this Redis Pub/Sub instance.
"""
self.params = params
self.redis_connection = redis.Redis(self.params.redis_address, self.params.redis_port)
self.redis_server_name = 'redis-server-{}'.format(uuid.uuid4())
self.redis_service_name = 'redis-service-{}'.format(uuid.uuid4())
def store(self, obj):
"""
:param obj: The object to store in memory. The object is either a Tranisition or Episode type.
"""
self.redis_connection.publish(self.params.channel, pickle.dumps(obj))
def deploy(self):
"""
Deploy the Redis Pub/Sub service in an orchestrator.
"""
if not self.params.deployed:
if self.params.orchestrator_type == 'kubernetes':
self.deploy_kubernetes()
# Wait till subscribe to the channel is possible or else it will cause delays in the trainer.
time.sleep(10)
def deploy_kubernetes(self):
"""
Deploy the Redis Pub/Sub service in Kubernetes orchestrator.
"""
if 'namespace' not in self.params.orchestrator_params:
self.params.orchestrator_params['namespace'] = "default"
from kubernetes import client, config
container = client.V1Container(
name=self.redis_server_name,
image='redis:4-alpine',
resources=client.V1ResourceRequirements(
limits={
"cpu": "8",
"memory": "4Gi"
# "nvidia.com/gpu": "0",
}
),
)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={'app': self.redis_server_name}),
spec=client.V1PodSpec(
containers=[container]
)
)
deployment_spec = client.V1DeploymentSpec(
replicas=1,
template=template,
selector=client.V1LabelSelector(
match_labels={'app': self.redis_server_name}
)
)
deployment = client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(name=self.redis_server_name, labels={'app': self.redis_server_name}),
spec=deployment_spec
)
config.load_kube_config()
api_client = client.AppsV1Api()
try:
print(self.params.orchestrator_params)
api_client.create_namespaced_deployment(self.params.orchestrator_params['namespace'], deployment)
except client.rest.ApiException as e:
print("Got exception: %s\n while creating redis-server", e)
return False
core_v1_api = client.CoreV1Api()
service = client.V1Service(
api_version='v1',
kind='Service',
metadata=client.V1ObjectMeta(
name=self.redis_service_name
),
spec=client.V1ServiceSpec(
selector={'app': self.redis_server_name},
ports=[client.V1ServicePort(
protocol='TCP',
port=6379,
target_port=6379
)]
)
)
try:
core_v1_api.create_namespaced_service(self.params.orchestrator_params['namespace'], service)
self.params.redis_address = '{}.{}.svc'.format(
self.redis_service_name, self.params.orchestrator_params['namespace']
)
self.params.redis_port = 6379
return True
except client.rest.ApiException as e:
print("Got exception: %s\n while creating a service for redis-server", e)
return False
def undeploy(self):
"""
Undeploy the Redis Pub/Sub service in an orchestrator.
"""
from kubernetes import client
if self.params.deployed:
return
from kubernetes import client
api_client = client.AppsV1Api()
delete_options = client.V1DeleteOptions()
try:
api_client.delete_namespaced_deployment(self.redis_server_name, self.params.orchestrator_params['namespace'], delete_options)
except client.rest.ApiException as e:
print("Got exception: %s\n while deleting redis-server", e)
api_client = client.CoreV1Api()
try:
api_client.delete_namespaced_service(self.redis_service_name, self.params.orchestrator_params['namespace'], delete_options)
except client.rest.ApiException as e:
print("Got exception: %s\n while deleting redis-server", e)
def sample(self, size):
pass
def fetch(self, num_consecutive_playing_steps=None):
"""
:param num_consecutive_playing_steps: The number steps to fetch.
"""
return RedisSub(redis_address=self.params.redis_address, redis_port=self.params.redis_port, channel=self.params.channel).run(num_consecutive_playing_steps)
def subscribe(self, agent):
"""
:param agent: The agent in use.
"""
redis_sub = RedisSub(redis_address=self.params.redis_address, redis_port=self.params.redis_port, channel=self.params.channel)
return redis_sub
def get_endpoint(self):
return {'redis_address': self.params.redis_address,
'redis_port': self.params.redis_port}
class RedisSub(object):
def __init__(self, redis_address: str = "localhost", redis_port: int=6379, channel: str = "PubsubChannel"):
super().__init__()
self.redis_connection = redis.Redis(redis_address, redis_port)
self.pubsub = self.redis_connection.pubsub()
self.subscriber = None
self.channel = channel
self.subscriber = self.pubsub.subscribe(self.channel)
def run(self, num_consecutive_playing_steps):
"""
:param num_consecutive_playing_steps: The number steps to fetch.
"""
transitions = 0
episodes = 0
steps = 0
for message in self.pubsub.listen():
if message and 'data' in message:
try:
obj = pickle.loads(message['data'])
if type(obj) == Transition:
transitions += 1
if obj.game_over:
episodes += 1
yield obj
elif type(obj) == Episode:
episodes += 1
transitions += len(obj.transitions)
yield from obj.transitions
except Exception:
continue
if type(num_consecutive_playing_steps) == EnvironmentSteps:
steps = transitions
if type(num_consecutive_playing_steps) == EnvironmentEpisodes:
steps = episodes
if steps >= num_consecutive_playing_steps.num_steps:
break | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/memories/backend/redis.py | 0.680348 | 0.165189 | redis.py | pypi |
import os
import pickle
import numpy as np
try:
import annoy
from annoy import AnnoyIndex
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("annoy")
class AnnoyDictionary(object):
def __init__(self, dict_size, key_width, new_value_shift_coefficient=0.1, batch_size=100, key_error_threshold=0.01,
num_neighbors=50, override_existing_keys=True, rebuild_on_every_update=False):
self.rebuild_on_every_update = rebuild_on_every_update
self.max_size = dict_size
self.curr_size = 0
self.new_value_shift_coefficient = new_value_shift_coefficient
self.num_neighbors = num_neighbors
self.override_existing_keys = override_existing_keys
self.index = AnnoyIndex(key_width, metric='euclidean')
self.index.set_seed(1)
self.embeddings = np.zeros((dict_size, key_width))
self.values = np.zeros(dict_size)
self.additional_data = [None] * dict_size
self.lru_timestamps = np.zeros(dict_size)
self.current_timestamp = 0.0
# keys that are in this distance will be considered as the same key
self.key_error_threshold = key_error_threshold
self.initial_update_size = batch_size
self.min_update_size = self.initial_update_size
self.key_dimension = key_width
self.value_dimension = 1
self._reset_buffer()
self.built_capacity = 0
def add(self, keys, values, additional_data=None, force_rebuild_tree=False):
if not additional_data:
additional_data = [None] * len(keys)
# Adds new embeddings and values to the dictionary
indices = []
indices_to_remove = []
for i in range(keys.shape[0]):
index = self._lookup_key_index(keys[i])
if index and self.override_existing_keys:
# update existing value
self.values[index] += self.new_value_shift_coefficient * (values[i] - self.values[index])
self.additional_data[index[0][0]] = additional_data[i]
self.lru_timestamps[index] = self.current_timestamp
indices_to_remove.append(i)
else:
# add new
if self.curr_size >= self.max_size:
# find the LRU entry
index = np.argmin(self.lru_timestamps)
else:
index = self.curr_size
self.curr_size += 1
self.lru_timestamps[index] = self.current_timestamp
indices.append(index)
for i in reversed(indices_to_remove):
keys = np.delete(keys, i, 0)
values = np.delete(values, i, 0)
del additional_data[i]
self.buffered_keys = np.vstack((self.buffered_keys, keys))
self.buffered_values = np.vstack((self.buffered_values, values))
self.buffered_indices = self.buffered_indices + indices
self.buffered_additional_data = self.buffered_additional_data + additional_data
if len(self.buffered_indices) >= self.min_update_size:
self.min_update_size = max(self.initial_update_size, int(self.curr_size * 0.02))
self._rebuild_index()
elif force_rebuild_tree or self.rebuild_on_every_update:
self._rebuild_index()
self.current_timestamp += 1
# Returns the stored embeddings and values of the closest embeddings
def query(self, keys, k):
if not self.has_enough_entries(k):
# this will only happen when the DND is not yet populated with enough entries, which is only during heatup
# these values won't be used and therefore they are meaningless
return [0.0], [0.0], [0], [None]
_, indices = self._get_k_nearest_neighbors_indices(keys, k)
embeddings = []
values = []
additional_data = []
for ind in indices:
self.lru_timestamps[ind] = self.current_timestamp
embeddings.append(self.embeddings[ind])
values.append(self.values[ind])
curr_additional_data = []
for sub_ind in ind:
curr_additional_data.append(self.additional_data[sub_ind])
additional_data.append(curr_additional_data)
self.current_timestamp += 1
return embeddings, values, indices, additional_data
def has_enough_entries(self, k):
return self.curr_size > k and (self.built_capacity > k)
def sample_embeddings(self, num_embeddings):
return self.embeddings[np.random.choice(self.curr_size, num_embeddings)]
def _get_k_nearest_neighbors_indices(self, keys, k):
distances = []
indices = []
for key in keys:
index, distance = self.index.get_nns_by_vector(key, k, include_distances=True)
distances.append(distance)
indices.append(index)
return distances, indices
def _rebuild_index(self):
self.index.unbuild()
self.embeddings[self.buffered_indices] = self.buffered_keys
self.values[self.buffered_indices] = np.squeeze(self.buffered_values)
for i, data in zip(self.buffered_indices, self.buffered_additional_data):
self.additional_data[i] = data
for idx, key in zip(self.buffered_indices, self.buffered_keys):
self.index.add_item(idx, key)
self._reset_buffer()
self.index.build(self.num_neighbors)
self.built_capacity = self.curr_size
def _reset_buffer(self):
self.buffered_keys = np.zeros((0, self.key_dimension))
self.buffered_values = np.zeros((0, self.value_dimension))
self.buffered_indices = []
self.buffered_additional_data = []
def _lookup_key_index(self, key):
distance, index = self._get_k_nearest_neighbors_indices([key], 1)
if distance != [[]] and distance[0][0] <= self.key_error_threshold:
return index
return None
class QDND(object):
def __init__(self, dict_size, key_width, num_actions, new_value_shift_coefficient=0.1, key_error_threshold=0.01,
learning_rate=0.01, num_neighbors=50, return_additional_data=False, override_existing_keys=False,
rebuild_on_every_update=False):
self.dict_size = dict_size
self.key_width = key_width
self.num_actions = num_actions
self.new_value_shift_coefficient = new_value_shift_coefficient
self.key_error_threshold = key_error_threshold
self.learning_rate = learning_rate
self.num_neighbors = num_neighbors
self.return_additional_data = return_additional_data
self.override_existing_keys = override_existing_keys
self.dicts = []
# create a dict for each action
for a in range(num_actions):
new_dict = AnnoyDictionary(dict_size, key_width, new_value_shift_coefficient,
key_error_threshold=key_error_threshold, num_neighbors=num_neighbors,
override_existing_keys=override_existing_keys,
rebuild_on_every_update=rebuild_on_every_update)
self.dicts.append(new_dict)
def add(self, embeddings, actions, values, additional_data=None):
# add a new set of embeddings and values to each of the underlining dictionaries
embeddings = np.array(embeddings)
actions = np.array(actions)
values = np.array(values)
for a in range(self.num_actions):
idx = np.where(actions == a)
curr_action_embeddings = embeddings[idx]
curr_action_values = np.expand_dims(values[idx], -1)
if additional_data:
curr_additional_data = []
for i in idx[0]:
curr_additional_data.append(additional_data[i])
else:
curr_additional_data = None
self.dicts[a].add(curr_action_embeddings, curr_action_values, curr_additional_data)
return True
def query(self, embeddings, action, k):
# query for nearest neighbors to the given embeddings
dnd_embeddings = []
dnd_values = []
dnd_indices = []
dnd_additional_data = []
for i in range(len(embeddings)):
embedding, value, indices, additional_data = self.dicts[action].query([embeddings[i]], k)
dnd_embeddings.append(embedding[0])
dnd_values.append(value[0])
dnd_indices.append(indices[0])
dnd_additional_data.append(additional_data[0])
if self.return_additional_data:
return dnd_embeddings, dnd_values, dnd_indices, dnd_additional_data
else:
return dnd_embeddings, dnd_values, dnd_indices
def has_enough_entries(self, k):
# check if each of the action dictionaries has at least k entries
for a in range(self.num_actions):
if not self.dicts[a].has_enough_entries(k):
return False
return True
def update_keys_and_values(self, actions, key_gradients, value_gradients, indices):
# Update DND keys and values
for batch_action, batch_keys, batch_values, batch_indices in zip(actions, key_gradients, value_gradients, indices):
# Update keys (embeddings) and values in DND
for i, index in enumerate(batch_indices):
self.dicts[batch_action].embeddings[index, :] -= self.learning_rate * batch_keys[i, :]
self.dicts[batch_action].values[index] -= self.learning_rate * batch_values[i]
def sample_embeddings(self, num_embeddings):
num_actions = len(self.dicts)
embeddings = []
num_embeddings_per_action = int(num_embeddings/num_actions)
for action in range(num_actions):
embeddings.append(self.dicts[action].sample_embeddings(num_embeddings_per_action))
embeddings = np.vstack(embeddings)
# the numbers did not divide nicely, let's just randomly sample some more embeddings
if num_embeddings_per_action * num_actions < num_embeddings:
action = np.random.randint(0, num_actions)
extra_embeddings = self.dicts[action].sample_embeddings(num_embeddings -
num_embeddings_per_action * num_actions)
embeddings = np.vstack([embeddings, extra_embeddings])
return embeddings
def clean(self):
# create a new dict for each action
self.dicts = []
for a in range(self.num_actions):
new_dict = AnnoyDictionary(self.dict_size, self.key_width, self.new_value_shift_coefficient,
key_error_threshold=self.key_error_threshold, num_neighbors=self.num_neighbors)
self.dicts.append(new_dict)
def load_dnd(model_dir):
latest_checkpoint_id = -1
latest_checkpoint = ''
# get all checkpoint files
for fname in os.listdir(model_dir):
path = os.path.join(model_dir, fname)
if os.path.isdir(path) or fname.split('.')[-1] != 'srs':
continue
checkpoint_id = int(fname.split('_')[0])
if checkpoint_id > latest_checkpoint_id:
latest_checkpoint = fname
latest_checkpoint_id = checkpoint_id
with open(os.path.join(model_dir, str(latest_checkpoint)), 'rb') as f:
DND = pickle.load(f)
for a in range(DND.num_actions):
DND.dicts[a].index = AnnoyIndex(512, metric='euclidean')
DND.dicts[a].index.set_seed(1)
for idx, key in zip(range(DND.dicts[a].curr_size), DND.dicts[a].embeddings[:DND.dicts[a].curr_size]):
DND.dicts[a].index.add_item(idx, key)
DND.dicts[a].index.build(50)
return DND | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/memories/non_episodic/differentiable_neural_dictionary.py | 0.566498 | 0.233499 | differentiable_neural_dictionary.py | pypi |
import operator
import random
from enum import Enum
from typing import List, Tuple, Any, Union
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
class BalancedExperienceReplayParameters(ExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.allow_duplicates_in_batch_sampling = False
self.num_classes = 0
self.state_key_with_the_class_index = 'class'
@property
def path(self):
return 'rl_coach.memories.non_episodic.balanced_experience_replay:BalancedExperienceReplay'
"""
A replay buffer which allows sampling batches which are balanced in terms of the classes that are sampled
"""
class BalancedExperienceReplay(ExperienceReplay):
def __init__(self, max_size: Tuple[MemoryGranularity, int], allow_duplicates_in_batch_sampling: bool=True,
num_classes: int=0, state_key_with_the_class_index: Any='class'):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
:param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
:param num_classes: the number of classes in the replayed data
:param state_key_with_the_class_index: the class index is assumed to be a value in the state dictionary.
this parameter determines the key to retrieve the class index value
"""
super().__init__(max_size, allow_duplicates_in_batch_sampling)
self.current_class_to_sample_from = 0
self.num_classes = num_classes
self.state_key_with_the_class_index = state_key_with_the_class_index
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
if self.num_classes < 2:
raise ValueError("The number of classes for a balanced replay buffer should be at least 2. "
"The number of classes that were defined are: {}".format(self.num_classes))
def store(self, transition: Transition, lock: bool=True) -> None:
"""
Store a new transition in the memory.
:param transition: a transition to store
:param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
locks and then calls store with lock = True
:return: None
"""
# Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
super().store(transition)
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self._num_transitions += 1
if self.state_key_with_the_class_index not in transition.state.keys():
raise ValueError("The class index was not present in the state of the transition under the given key ({})"
.format(self.state_key_with_the_class_index))
class_idx = transition.state[self.state_key_with_the_class_index]
if class_idx >= self.num_classes:
raise ValueError("The given class index is outside the defined number of classes for the replay buffer. "
"The given class was: {} and the number of classes defined is: {}"
.format(class_idx, self.num_classes))
self.transitions[class_idx].append(transition)
self.transitions_order.append(class_idx)
self._enforce_max_length()
if lock:
self.reader_writer_lock.release_writing_and_reading()
def sample(self, size: int) -> List[Transition]:
"""
Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:return: a batch (list) of selected transitions from the replay buffer
"""
self.reader_writer_lock.lock_writing()
if size % self.num_classes != 0:
raise ValueError("Sampling batches from a balanced replay buffer should be done only using batch sizes "
"which are a multiple of the number of classes. The number of classes defined is: {} "
"and the batch size requested is: {}".format(self.num_classes, size))
batch_size_from_each_class = size // self.num_classes
if self.allow_duplicates_in_batch_sampling:
transitions_idx = [np.random.randint(len(class_transitions), size=batch_size_from_each_class)
for class_transitions in self.transitions]
else:
for class_idx, class_transitions in enumerate(self.transitions):
if self.num_transitions() < batch_size_from_each_class:
raise ValueError("The replay buffer cannot be sampled since there are not enough transitions yet. "
"There are currently {} transitions for class {}"
.format(len(class_transitions), class_idx))
transitions_idx = [np.random.choice(len(class_transitions), size=batch_size_from_each_class, replace=False)
for class_transitions in self.transitions]
batch = []
for class_idx, class_transitions_idx in enumerate(transitions_idx):
batch += [self.transitions[class_idx][i] for i in class_transitions_idx]
self.reader_writer_lock.release_writing()
return batch
def remove_transition(self, transition_index: int, lock: bool=True) -> None:
raise ValueError("It is not possible to remove specific transitions with a balanced replay buffer")
def get_transition(self, transition_index: int, lock: bool=True) -> Union[None, Transition]:
raise ValueError("It is not possible to access specific transitions with a balanced replay buffer")
def _enforce_max_length(self) -> None:
"""
Make sure that the size of the replay buffer does not pass the maximum size allowed.
If it passes the max size, the oldest transition in the replay buffer will be removed.
This function does not use locks since it is only called internally
:return: None
"""
granularity, size = self.max_size
if granularity == MemoryGranularity.Transitions:
while size != 0 and self.num_transitions() > size:
self._num_transitions -= 1
del self.transitions[self.transitions_order[0]][0]
del self.transitions_order[0]
else:
raise ValueError("The granularity of the replay buffer can only be set in terms of transitions")
def clean(self, lock: bool=True) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
self._num_transitions = 0
if lock:
self.reader_writer_lock.release_writing_and_reading() | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/memories/non_episodic/balanced_experience_replay.py | 0.864368 | 0.439326 | balanced_experience_replay.py | pypi |
import operator
import random
from enum import Enum
from typing import List, Tuple, Any
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
class PrioritizedExperienceReplayParameters(ExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.alpha = 0.6
self.beta = ConstantSchedule(0.4)
self.epsilon = 1e-6
@property
def path(self):
return 'rl_coach.memories.non_episodic.prioritized_experience_replay:PrioritizedExperienceReplay'
class SegmentTree(object):
"""
A tree which can be used as a min/max heap or a sum tree
Add or update item value - O(log N)
Sampling an item - O(log N)
"""
class Operation(Enum):
MAX = {"operator": max, "initial_value": -float("inf")}
MIN = {"operator": min, "initial_value": float("inf")}
SUM = {"operator": operator.add, "initial_value": 0}
def __init__(self, size: int, operation: Operation):
self.next_leaf_idx_to_write = 0
self.size = size
if not (size > 0 and size & (size - 1) == 0):
raise ValueError("A segment tree size must be a positive power of 2. The given size is {}".format(self.size))
self.operation = operation
self.tree = np.ones(2 * size - 1) * self.operation.value['initial_value']
self.data = [None] * size
def _propagate(self, node_idx: int) -> None:
"""
Propagate an update of a node's value to its parent node
:param node_idx: the index of the node that was updated
:return: None
"""
parent = (node_idx - 1) // 2
self.tree[parent] = self.operation.value['operator'](self.tree[parent * 2 + 1], self.tree[parent * 2 + 2])
if parent != 0:
self._propagate(parent)
def _retrieve(self, root_node_idx: int, val: float)-> int:
"""
Retrieve the first node that has a value larger than val and is a child of the node at index idx
:param root_node_idx: the index of the root node to search from
:param val: the value to query for
:return: the index of the resulting node
"""
left = 2 * root_node_idx + 1
right = left + 1
if left >= len(self.tree):
return root_node_idx
if val <= self.tree[left]:
return self._retrieve(left, val)
else:
return self._retrieve(right, val-self.tree[left])
def total_value(self) -> float:
"""
Return the total value of the tree according to the tree operation. For SUM for example, this will return
the total sum of the tree. for MIN, this will return the minimal value
:return: the total value of the tree
"""
return self.tree[0]
def add(self, val: float, data: Any) -> None:
"""
Add a new value to the tree with data assigned to it
:param val: the new value to add to the tree
:param data: the data that should be assigned to this value
:return: None
"""
self.data[self.next_leaf_idx_to_write] = data
self.update(self.next_leaf_idx_to_write, val)
self.next_leaf_idx_to_write += 1
if self.next_leaf_idx_to_write >= self.size:
self.next_leaf_idx_to_write = 0
def update(self, leaf_idx: int, new_val: float) -> None:
"""
Update the value of the node at index idx
:param leaf_idx: the index of the node to update
:param new_val: the new value of the node
:return: None
"""
node_idx = leaf_idx + self.size - 1
if not 0 <= node_idx < len(self.tree):
raise ValueError("The given left index ({}) can not be found in the tree. The available leaves are: 0-{}"
.format(leaf_idx, self.size - 1))
self.tree[node_idx] = new_val
self._propagate(node_idx)
def get_element_by_partial_sum(self, val: float) -> Tuple[int, float, Any]:
"""
Given a value between 0 and the tree sum, return the object which this value is in it's range.
For example, if we have 3 leaves: 10, 20, 30, and val=35, this will return the 3rd leaf, by accumulating
leaves by their order until getting to 35. This allows sampling leaves according to their proportional
probability.
:param val: a value within the range 0 and the tree sum
:return: the index of the resulting leaf in the tree, its probability and
the object itself
"""
node_idx = self._retrieve(0, val)
leaf_idx = node_idx - self.size + 1
data_value = self.tree[node_idx]
data = self.data[leaf_idx]
return leaf_idx, data_value, data
def __str__(self):
result = ""
start = 0
size = 1
while size <= self.size:
result += "{}\n".format(self.tree[start:(start + size)])
start += size
size *= 2
return result
class PrioritizedExperienceReplay(ExperienceReplay):
"""
This is the proportional sampling variant of the prioritized experience replay as described
in https://arxiv.org/pdf/1511.05952.pdf.
"""
def __init__(self, max_size: Tuple[MemoryGranularity, int], alpha: float=0.6, beta: Schedule=ConstantSchedule(0.4),
epsilon: float=1e-6, allow_duplicates_in_batch_sampling: bool=True):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
:param alpha: the alpha prioritization coefficient
:param beta: the beta parameter used for importance sampling
:param epsilon: a small value added to the priority of each transition
:param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
"""
if max_size[0] != MemoryGranularity.Transitions:
raise ValueError("Prioritized Experience Replay currently only support setting the memory size in "
"transitions granularity.")
self.power_of_2_size = 1
while self.power_of_2_size < max_size[1]:
self.power_of_2_size *= 2
super().__init__((MemoryGranularity.Transitions, self.power_of_2_size), allow_duplicates_in_batch_sampling)
self.sum_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.SUM)
self.min_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MIN)
self.max_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MAX)
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
self.maximal_priority = 1.0
def _update_priority(self, leaf_idx: int, error: float) -> None:
"""
Update the priority of a given transition, using its index in the tree and its error
:param leaf_idx: the index of the transition leaf in the tree
:param error: the new error value
:return: None
"""
if error < 0:
raise ValueError("The priorities must be non-negative values")
priority = (error + self.epsilon)
self.sum_tree.update(leaf_idx, priority ** self.alpha)
self.min_tree.update(leaf_idx, priority ** self.alpha)
self.max_tree.update(leaf_idx, priority)
self.maximal_priority = self.max_tree.total_value()
def update_priorities(self, indices: List[int], error_values: List[float]) -> None:
"""
Update the priorities of a batch of transitions using their indices and their new TD error terms
:param indices: the indices of the transitions to update
:param error_values: the new error values
:return: None
"""
self.reader_writer_lock.lock_writing_and_reading()
if len(indices) != len(error_values):
raise ValueError("The number of indexes requested for update don't match the number of error values given")
for transition_idx, error in zip(indices, error_values):
self._update_priority(transition_idx, error)
self.reader_writer_lock.release_writing_and_reading()
def sample(self, size: int) -> List[Transition]:
"""
Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:return: a batch (list) of selected transitions from the replay buffer
"""
self.reader_writer_lock.lock_writing()
if self.num_transitions() >= size:
# split the tree leaves to equal segments and sample one transition from each segment
batch = []
segment_size = self.sum_tree.total_value() / size
# get the maximum weight in the memory
min_probability = self.min_tree.total_value() / self.sum_tree.total_value() # min P(j) = min p^a / sum(p^a)
max_weight = (min_probability * self.num_transitions()) ** -self.beta.current_value # max wi
# sample a batch
for i in range(size):
segment_start = segment_size * i
segment_end = segment_size * (i + 1)
# sample leaf and calculate its weight
val = random.uniform(segment_start, segment_end)
leaf_idx, priority, transition = self.sum_tree.get_element_by_partial_sum(val)
priority /= self.sum_tree.total_value() # P(j) = p^a / sum(p^a)
weight = (self.num_transitions() * priority) ** -self.beta.current_value # (N * P(j)) ^ -beta
normalized_weight = weight / max_weight # wj = ((N * P(j)) ^ -beta) / max wi
transition.info['idx'] = leaf_idx
transition.info['weight'] = normalized_weight
batch.append(transition)
self.beta.step()
else:
raise ValueError("The replay buffer cannot be sampled since there are not enough transitions yet. "
"There are currently {} transitions".format(self.num_transitions()))
self.reader_writer_lock.release_writing()
return batch
def store(self, transition: Transition, lock=True) -> None:
"""
Store a new transition in the memory.
:param transition: a transition to store
:return: None
"""
# Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
super().store(transition)
if lock:
self.reader_writer_lock.lock_writing_and_reading()
transition_priority = self.maximal_priority
self.sum_tree.add(transition_priority ** self.alpha, transition)
self.min_tree.add(transition_priority ** self.alpha, transition)
self.max_tree.add(transition_priority, transition)
super().store(transition, False)
if lock:
self.reader_writer_lock.release_writing_and_reading()
def clean(self, lock=True) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
super().clean(lock=False)
self.sum_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.SUM)
self.min_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MIN)
self.max_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MAX)
if lock:
self.reader_writer_lock.release_writing_and_reading() | /rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/memories/non_episodic/prioritized_experience_replay.py | 0.912799 | 0.472379 | prioritized_experience_replay.py | pypi |
# Coach
[](https://circleci.com/gh/NervanaSystems/workflows/coach/tree/master)
[](https://github.com/NervanaSystems/coach/blob/master/LICENSE)
[](https://nervanasystems.github.io/coach/)
[](https://doi.org/10.5281/zenodo.1134898)
<p align="center"><img src="img/coach_logo.png" alt="Coach Logo" width="200"/></p>
Coach is a python reinforcement learning framework containing implementation of many state-of-the-art algorithms.
It exposes a set of easy-to-use APIs for experimenting with new RL algorithms, and allows simple integration of new environments to solve.
Basic RL components (algorithms, environments, neural network architectures, exploration policies, ...) are well decoupled, so that extending and reusing existing components is fairly painless.
Training an agent to solve an environment is as easy as running:
```bash
coach -p CartPole_DQN -r
```
<img src="img/fetch_slide.gif" alt="Fetch Slide"/> <img src="img/pendulum.gif" alt="Pendulum"/> <img src="img/starcraft.gif" width = "281" height ="200" alt="Starcraft"/>
<br>
<img src="img/doom_deathmatch.gif" alt="Doom Deathmatch"/> <img src="img/carla.gif" alt="CARLA"/> <img src="img/montezuma.gif" alt="MontezumaRevenge" width = "164" height ="200"/>
<br>
<img src="img/doom_health.gif" alt="Doom Health Gathering"/> <img src="img/minitaur.gif" alt="PyBullet Minitaur" width = "249" height ="200"/> <img src="img/ant.gif" alt="Gym Extensions Ant"/>
<br><br>
* [Release 0.8.0](https://ai.intel.com/reinforcement-learning-coach-intel/) (initial release)
* [Release 0.9.0](https://ai.intel.com/reinforcement-learning-coach-carla-qr-dqn/)
* [Release 0.10.0](https://ai.intel.com/introducing-reinforcement-learning-coach-0-10-0/)
* [Release 0.11.0](https://ai.intel.com/rl-coach-data-science-at-scale)
* [Release 0.12.0](https://github.com/NervanaSystems/coach/releases/tag/v0.12.0)
* [Release 1.0.0](https://www.intel.ai/rl-coach-new-release) (current release)
## Table of Contents
- [Benchmarks](#benchmarks)
- [Installation](#installation)
- [Getting Started](#getting-started)
* [Tutorials and Documentation](#tutorials-and-documentation)
* [Basic Usage](#basic-usage)
* [Running Coach](#running-coach)
* [Running Coach Dashboard (Visualization)](#running-coach-dashboard-visualization)
* [Distributed Multi-Node Coach](#distributed-multi-node-coach)
* [Batch Reinforcement Learning](#batch-reinforcement-learning)
- [Supported Environments](#supported-environments)
- [Supported Algorithms](#supported-algorithms)
- [Citation](#citation)
- [Contact](#contact)
- [Disclaimer](#disclaimer)
## Benchmarks
One of the main challenges when building a research project, or a solution based on a published algorithm, is getting a concrete and reliable baseline that reproduces the algorithm's results, as reported by its authors. To address this problem, we are releasing a set of [benchmarks](benchmarks) that shows Coach reliably reproduces many state of the art algorithm results.
## Installation
Note: Coach has only been tested on Ubuntu 16.04 LTS, and with Python 3.5.
For some information on installing on Ubuntu 17.10 with Python 3.6.3, please refer to the following issue: https://github.com/NervanaSystems/coach/issues/54
In order to install coach, there are a few prerequisites required. This will setup all the basics needed to get the user going with running Coach on top of [OpenAI Gym](https://github.com/openai/gym) environments:
```
# General
sudo -E apt-get install python3-pip cmake zlib1g-dev python3-tk python-opencv -y
# Boost libraries
sudo -E apt-get install libboost-all-dev -y
# Scipy requirements
sudo -E apt-get install libblas-dev liblapack-dev libatlas-base-dev gfortran -y
# PyGame
sudo -E apt-get install libsdl-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev
libsmpeg-dev libportmidi-dev libavformat-dev libswscale-dev -y
# Dashboard
sudo -E apt-get install dpkg-dev build-essential python3.5-dev libjpeg-dev libtiff-dev libsdl1.2-dev libnotify-dev
freeglut3 freeglut3-dev libsm-dev libgtk2.0-dev libgtk-3-dev libwebkitgtk-dev libgtk-3-dev libwebkitgtk-3.0-dev
libgstreamer-plugins-base1.0-dev -y
# Gym
sudo -E apt-get install libav-tools libsdl2-dev swig cmake -y
```
We recommend installing coach in a virtualenv:
```
sudo -E pip3 install virtualenv
virtualenv -p python3 coach_env
. coach_env/bin/activate
```
Finally, install coach using pip:
```
pip3 install rl_coach
```
Or alternatively, for a development environment, install coach from the cloned repository:
```
cd coach
pip3 install -e .
```
If a GPU is present, Coach's pip package will install tensorflow-gpu, by default. If a GPU is not present, an [Intel-Optimized TensorFlow](https://software.intel.com/en-us/articles/intel-optimized-tensorflow-wheel-now-available), will be installed.
In addition to OpenAI Gym, several other environments were tested and are supported. Please follow the instructions in the Supported Environments section below in order to install more environments.
## Getting Started
### Tutorials and Documentation
[Jupyter notebooks demonstrating how to run Coach from command line or as a library, implement an algorithm, or integrate an environment](https://github.com/NervanaSystems/coach/tree/master/tutorials).
[Framework documentation, algorithm description and instructions on how to contribute a new agent/environment](https://nervanasystems.github.io/coach/).
### Basic Usage
#### Running Coach
To allow reproducing results in Coach, we defined a mechanism called _preset_.
There are several available presets under the `presets` directory.
To list all the available presets use the `-l` flag.
To run a preset, use:
```bash
coach -r -p <preset_name>
```
For example:
* CartPole environment using Policy Gradients (PG):
```bash
coach -r -p CartPole_PG
```
* Basic level of Doom using Dueling network and Double DQN (DDQN) algorithm:
```bash
coach -r -p Doom_Basic_Dueling_DDQN
```
Some presets apply to a group of environment levels, like the entire Atari or Mujoco suites for example.
To use these presets, the requeseted level should be defined using the `-lvl` flag.
For example:
* Pong using the Nerual Episodic Control (NEC) algorithm:
```bash
coach -r -p Atari_NEC -lvl pong
```
There are several types of agents that can benefit from running them in a distributed fashion with multiple workers in parallel. Each worker interacts with its own copy of the environment but updates a shared network, which improves the data collection speed and the stability of the learning process.
To specify the number of workers to run, use the `-n` flag.
For example:
* Breakout using Asynchronous Advantage Actor-Critic (A3C) with 8 workers:
```bash
coach -r -p Atari_A3C -lvl breakout -n 8
```
It is easy to create new presets for different levels or environments by following the same pattern as in presets.py
More usage examples can be found [here](https://github.com/NervanaSystems/coach/blob/master/tutorials/0.%20Quick%20Start%20Guide.ipynb).
#### Running Coach Dashboard (Visualization)
Training an agent to solve an environment can be tricky, at times.
In order to debug the training process, Coach outputs several signals, per trained algorithm, in order to track algorithmic performance.
While Coach trains an agent, a csv file containing the relevant training signals will be saved to the 'experiments' directory. Coach's dashboard can then be used to dynamically visualize the training signals, and track algorithmic behavior.
To use it, run:
```bash
dashboard
```
<img src="img/dashboard.gif" alt="Coach Design" style="width: 800px;"/>
### Distributed Multi-Node Coach
As of release 0.11.0, Coach supports horizontal scaling for training RL agents on multiple nodes. In release 0.11.0 this was tested on the ClippedPPO and DQN agents.
For usage instructions please refer to the documentation [here](https://nervanasystems.github.io/coach/dist_usage.html).
### Batch Reinforcement Learning
Training and evaluating an agent from a dataset of experience, where no simulator is available, is supported in Coach.
There are [example](https://github.com/NervanaSystems/coach/blob/master/rl_coach/presets/CartPole_DDQN_BatchRL.py) [presets](https://github.com/NervanaSystems/coach/blob/master/rl_coach/presets/Acrobot_DDQN_BCQ_BatchRL.py) and a [tutorial](https://github.com/NervanaSystems/coach/blob/master/tutorials/4.%20Batch%20Reinforcement%20Learning.ipynb).
## Supported Environments
* *OpenAI Gym:*
Installed by default by Coach's installer
* *ViZDoom:*
Follow the instructions described in the ViZDoom repository -
https://github.com/mwydmuch/ViZDoom
Additionally, Coach assumes that the environment variable VIZDOOM_ROOT points to the ViZDoom installation directory.
* *Roboschool:*
Follow the instructions described in the roboschool repository -
https://github.com/openai/roboschool
* *GymExtensions:*
Follow the instructions described in the GymExtensions repository -
https://github.com/Breakend/gym-extensions
Additionally, add the installation directory to the PYTHONPATH environment variable.
* *PyBullet:*
Follow the instructions described in the [Quick Start Guide](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA) (basically just - 'pip install pybullet')
* *CARLA:*
Download release 0.8.4 from the CARLA repository -
https://github.com/carla-simulator/carla/releases
Install the python client and dependencies from the release tarball:
```
pip3 install -r PythonClient/requirements.txt
pip3 install PythonClient
```
Create a new CARLA_ROOT environment variable pointing to CARLA's installation directory.
A simple CARLA settings file (```CarlaSettings.ini```) is supplied with Coach, and is located in the ```environments``` directory.
* *Starcraft:*
Follow the instructions described in the PySC2 repository -
https://github.com/deepmind/pysc2
* *DeepMind Control Suite:*
Follow the instructions described in the DeepMind Control Suite repository -
https://github.com/deepmind/dm_control
## Supported Algorithms
<img src="docs_raw/source/_static/img/algorithms.png" alt="Coach Design" style="width: 800px;"/>
### Value Optimization Agents
* [Deep Q Network (DQN)](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) ([code](rl_coach/agents/dqn_agent.py))
* [Double Deep Q Network (DDQN)](https://arxiv.org/pdf/1509.06461.pdf) ([code](rl_coach/agents/ddqn_agent.py))
* [Dueling Q Network](https://arxiv.org/abs/1511.06581)
* [Mixed Monte Carlo (MMC)](https://arxiv.org/abs/1703.01310) ([code](rl_coach/agents/mmc_agent.py))
* [Persistent Advantage Learning (PAL)](https://arxiv.org/abs/1512.04860) ([code](rl_coach/agents/pal_agent.py))
* [Categorical Deep Q Network (C51)](https://arxiv.org/abs/1707.06887) ([code](rl_coach/agents/categorical_dqn_agent.py))
* [Quantile Regression Deep Q Network (QR-DQN)](https://arxiv.org/pdf/1710.10044v1.pdf) ([code](rl_coach/agents/qr_dqn_agent.py))
* [N-Step Q Learning](https://arxiv.org/abs/1602.01783) | **Multi Worker Single Node** ([code](rl_coach/agents/n_step_q_agent.py))
* [Neural Episodic Control (NEC)](https://arxiv.org/abs/1703.01988) ([code](rl_coach/agents/nec_agent.py))
* [Normalized Advantage Functions (NAF)](https://arxiv.org/abs/1603.00748.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/naf_agent.py))
* [Rainbow](https://arxiv.org/abs/1710.02298) ([code](rl_coach/agents/rainbow_dqn_agent.py))
### Policy Optimization Agents
* [Policy Gradients (PG)](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/policy_gradients_agent.py))
* [Asynchronous Advantage Actor-Critic (A3C)](https://arxiv.org/abs/1602.01783) | **Multi Worker Single Node** ([code](rl_coach/agents/actor_critic_agent.py))
* [Deep Deterministic Policy Gradients (DDPG)](https://arxiv.org/abs/1509.02971) | **Multi Worker Single Node** ([code](rl_coach/agents/ddpg_agent.py))
* [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf) ([code](rl_coach/agents/ppo_agent.py))
* [Clipped Proximal Policy Optimization (CPPO)](https://arxiv.org/pdf/1707.06347.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/clipped_ppo_agent.py))
* [Generalized Advantage Estimation (GAE)](https://arxiv.org/abs/1506.02438) ([code](rl_coach/agents/actor_critic_agent.py#L86))
* [Sample Efficient Actor-Critic with Experience Replay (ACER)](https://arxiv.org/abs/1611.01224) | **Multi Worker Single Node** ([code](rl_coach/agents/acer_agent.py))
* [Soft Actor-Critic (SAC)](https://arxiv.org/abs/1801.01290) ([code](rl_coach/agents/soft_actor_critic_agent.py))
* [Twin Delayed Deep Deterministic Policy Gradient (TD3)](https://arxiv.org/pdf/1802.09477.pdf) ([code](rl_coach/agents/td3_agent.py))
### General Agents
* [Direct Future Prediction (DFP)](https://arxiv.org/abs/1611.01779) | **Multi Worker Single Node** ([code](rl_coach/agents/dfp_agent.py))
### Imitation Learning Agents
* Behavioral Cloning (BC) ([code](rl_coach/agents/bc_agent.py))
* [Conditional Imitation Learning](https://arxiv.org/abs/1710.02410) ([code](rl_coach/agents/cil_agent.py))
### Hierarchical Reinforcement Learning Agents
* [Hierarchical Actor Critic (HAC)](https://arxiv.org/abs/1712.00948.pdf) ([code](rl_coach/agents/hac_ddpg_agent.py))
### Memory Types
* [Hindsight Experience Replay (HER)](https://arxiv.org/abs/1707.01495.pdf) ([code](rl_coach/memories/episodic/episodic_hindsight_experience_replay.py))
* [Prioritized Experience Replay (PER)](https://arxiv.org/abs/1511.05952) ([code](rl_coach/memories/non_episodic/prioritized_experience_replay.py))
### Exploration Techniques
* E-Greedy ([code](rl_coach/exploration_policies/e_greedy.py))
* Boltzmann ([code](rl_coach/exploration_policies/boltzmann.py))
* Ornstein–Uhlenbeck process ([code](rl_coach/exploration_policies/ou_process.py))
* Normal Noise ([code](rl_coach/exploration_policies/additive_noise.py))
* Truncated Normal Noise ([code](rl_coach/exploration_policies/truncated_normal.py))
* [Bootstrapped Deep Q Network](https://arxiv.org/abs/1602.04621) ([code](rl_coach/agents/bootstrapped_dqn_agent.py))
* [UCB Exploration via Q-Ensembles (UCB)](https://arxiv.org/abs/1706.01502) ([code](rl_coach/exploration_policies/ucb.py))
* [Noisy Networks for Exploration](https://arxiv.org/abs/1706.10295) ([code](rl_coach/exploration_policies/parameter_noise.py))
## Citation
If you used Coach for your work, please use the following citation:
```
@misc{caspi_itai_2017_1134899,
author = {Caspi, Itai and
Leibovich, Gal and
Novik, Gal and
Endrawis, Shadi},
title = {Reinforcement Learning Coach},
month = dec,
year = 2017,
doi = {10.5281/zenodo.1134899},
url = {https://doi.org/10.5281/zenodo.1134899}
}
```
## Contact
We'd be happy to get any questions or contributions through GitHub issues and PRs.
Please make sure to take a look [here](CONTRIBUTING.md) before filing an issue or proposing a PR.
The Coach development team can also be contacted over [email](mailto:coach@intel.com)
## Disclaimer
Coach is released as a reference code for research purposes. It is not an official Intel product, and the level of quality and support may not be as expected from an official product.
Additional algorithms and environments are planned to be added to the framework. Feedback and contributions from the open source and RL research communities are more than welcome.
| /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/README.md | 0.777215 | 0.964556 | README.md | pypi |
from typing import Any, Dict, List
class Saver(object):
"""
ABC for saver objects that implement saving/restoring to/from path, and merging two savers.
"""
@property
def path(self):
"""
Relative path for save/load. If two saver objects return the same path, they must be merge-able.
"""
raise NotImplementedError
def save(self, sess: Any, save_path: str) -> List[str]:
"""
Save to save_path
:param sess: active session for session-based frameworks (e.g. TF)
:param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count).
:return: list of all saved paths
"""
raise NotImplementedError
def restore(self, sess: Any, restore_path: str) -> None:
"""
Restore from restore_path
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: full path to load checkpoint from.
"""
raise NotImplementedError
def merge(self, other: 'Saver') -> None:
"""
Merge other saver into this saver
:param other: saver to be merged into self
"""
raise NotImplementedError
class SaverCollection(object):
"""
Object for storing a collection of saver objects. It takes care of ensuring uniqueness of saver paths
and merging savers if they have the same path. For example, if a saver handles saving a generic key/value
file for all networks in a single file, it can use a more generic path and all savers of all networks would be
merged into a single saver that saves/restores parameters for all networks.
NOTE: If two savers have the same path, the respective saver class must support merging them
into a single saver that saves/restores all merged parameters.
"""
def __init__(self, saver: Saver = None):
"""
:param saver: optional initial saver for the collection
"""
self._saver_dict = dict() # type: Dict[str, Saver]
if saver is not None:
self._saver_dict[saver.path] = saver
def add(self, saver: Saver):
"""
Add a new saver to the collection. If saver.path is already in the collection, merge
the new saver with the existing saver.
:param saver: new saver to be added to collection
"""
if saver.path in self._saver_dict:
self._saver_dict[saver.path].merge(saver)
else:
self._saver_dict[saver.path] = saver
def update(self, other: 'SaverCollection'):
"""
Merge savers from other collection into self
:param other: saver collection to update self with.
"""
for c in other:
self.add(c)
def save(self, sess: Any, save_path: str) -> List[str]:
"""
Call save on all savers in the collection
:param sess: active session for session-based frameworks (e.g. TF)
:param save_path: path for saving checkpoints using savers. All saved file paths must
start with this path in their full path. For example if save_path is '/home/checkpoints/checkpoint-01',
then saved file paths can be '/home/checkpoints/checkpoint-01.main-network' but not
'/home/checkpoints/main-network'
:return: list of all saved paths
"""
paths = list()
for saver in self:
paths.extend(saver.save(sess, self._full_path(save_path, saver)))
return paths
def restore(self, sess: Any, restore_path: str) -> None:
"""
Call restore on all savers in the collection
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: path for restoring checkpoint using savers.
"""
for saver in self:
saver.restore(sess, self._full_path(restore_path, saver))
def __iter__(self):
"""
Return an iterator for savers in the collection
:return: saver iterator
"""
return (v for v in self._saver_dict.values())
@staticmethod
def _full_path(path_prefix: str, saver: Saver) -> str:
"""
Concatenates path of the saver to parent prefix to create full save path
:param path_prefix: prefix of the path
:param saver: saver object to get unique path extension from
:return: full path
"""
if saver.path == "":
return path_prefix
return "{}.{}".format(path_prefix, saver.path) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/saver.py | 0.875268 | 0.393414 | saver.py | pypi |
import argparse
import os
import matplotlib
import matplotlib.pyplot as plt
from rl_coach.dashboard_components.signals_file import SignalsFile
class FigureMaker(object):
def __init__(self, path, cols, smoothness, signal_to_plot, x_axis, color):
self.experiments_path = path
self.environments = self.list_environments()
self.cols = cols
self.rows = int((len(self.environments) + cols - 1) / cols)
self.smoothness = smoothness
self.signal_to_plot = signal_to_plot
self.x_axis = x_axis
self.color = color
params = {
'axes.labelsize': 8,
'font.size': 10,
'legend.fontsize': 14,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': False,
'figure.figsize': [16, 30]
}
matplotlib.rcParams.update(params)
def list_environments(self):
environments = sorted([e.name for e in os.scandir(self.experiments_path) if e.is_dir()])
filtered_environments = self.filter_environments(environments)
return filtered_environments
def filter_environments(self, environments):
filtered_environments = []
for idx, environment in enumerate(environments):
path = os.path.join(self.experiments_path, environment)
experiments = [e.name for e in os.scandir(path) if e.is_dir()]
# take only the last updated experiment directory
last_experiment_dir = max([os.path.join(path, root) for root in experiments], key=os.path.getctime)
# make sure there is a csv file inside it
for file_path in os.listdir(last_experiment_dir):
full_file_path = os.path.join(last_experiment_dir, file_path)
if os.path.isfile(full_file_path) and file_path.endswith('.csv'):
filtered_environments.append((environment, full_file_path))
return filtered_environments
def plot_figures(self, prev_subplot_map=None):
subplot_map = {}
for idx, (environment, full_file_path) in enumerate(self.environments):
environment = environment.split('level')[1].split('-')[1].split('Deterministic')[0][1:]
if prev_subplot_map:
# skip on environments which were not plotted before
if environment not in prev_subplot_map.keys():
continue
subplot_idx = prev_subplot_map[environment]
else:
subplot_idx = idx + 1
print(environment)
axis = plt.subplot(self.rows, self.cols, subplot_idx)
subplot_map[environment] = subplot_idx
signals = SignalsFile(full_file_path)
signals.change_averaging_window(self.smoothness, force=True, signals=[self.signal_to_plot])
steps = signals.bokeh_source.data[self.x_axis]
rewards = signals.bokeh_source.data[self.signal_to_plot]
yloc = plt.MaxNLocator(4)
axis.yaxis.set_major_locator(yloc)
axis.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.title(environment, fontsize=10, y=1.08)
plt.plot(steps, rewards, self.color, linewidth=0.8)
plt.subplots_adjust(hspace=2.0, wspace=0.4)
return subplot_map
def save_pdf(self, name):
plt.savefig(name + ".pdf", bbox_inches='tight')
def show_figures(self):
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--paths',
help="(string) Root directory of the experiments",
default=None,
type=str)
parser.add_argument('-c', '--cols',
help="(int) Number of plot columns",
default=6,
type=int)
parser.add_argument('-s', '--smoothness',
help="(int) Number of consequent episodes to average over",
default=100,
type=int)
parser.add_argument('-sig', '--signal',
help="(str) The name of the signal to plot",
default='Evaluation Reward',
type=str)
parser.add_argument('-x', '--x_axis',
help="(str) The meaning of the x axis",
default='Total steps',
type=str)
parser.add_argument('-pdf', '--pdf',
help="(str) A name of a pdf to save to",
default='atari',
type=str)
args = parser.parse_args()
paths = args.paths.split(",")
subplot_map = None
for idx, path in enumerate(paths):
maker = FigureMaker(path, cols=args.cols, smoothness=args.smoothness, signal_to_plot=args.signal, x_axis=args.x_axis, color='C{}'.format(idx))
subplot_map = maker.plot_figures(subplot_map)
plt.legend(paths)
maker.save_pdf(args.pdf)
maker.show_figures() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/plot_atari.py | 0.639961 | 0.397588 | plot_atari.py | pypi |
import inspect
import json
import os
import sys
import types
from collections import OrderedDict
from enum import Enum
from typing import Dict, List, Union
from rl_coach.core_types import TrainingSteps, EnvironmentSteps, GradientClippingMethod, RunPhase, \
SelectedPhaseOnlyDumpFilter, MaxDumpFilter
from rl_coach.filters.filter import NoInputFilter
from rl_coach.logger import screen
class Frameworks(Enum):
tensorflow = "TensorFlow"
mxnet = "MXNet"
class EmbedderScheme(Enum):
Empty = "Empty"
Shallow = "Shallow"
Medium = "Medium"
Deep = "Deep"
class MiddlewareScheme(Enum):
Empty = "Empty"
Shallow = "Shallow"
Medium = "Medium"
Deep = "Deep"
class EmbeddingMergerType(Enum):
Concat = 0
Sum = 1
#ConcatDepthWise = 2
#Multiply = 3
class RunType(Enum):
ORCHESTRATOR = "orchestrator"
TRAINER = "trainer"
ROLLOUT_WORKER = "rollout-worker"
def __str__(self):
return self.value
class DeviceType(Enum):
CPU = 'cpu'
GPU = 'gpu'
class Device(object):
def __init__(self, device_type: DeviceType, index: int=0):
"""
:param device_type: type of device (CPU/GPU)
:param index: index of device (only used if device type is GPU)
"""
self._device_type = device_type
self._index = index
@property
def device_type(self):
return self._device_type
@property
def index(self):
return self._index
def __str__(self):
return "{}{}".format(self._device_type, self._index)
def __repr__(self):
return str(self)
# DistributedCoachSynchronizationType provides the synchronization type for distributed Coach.
# The default value is None, which means the algorithm or preset cannot be used with distributed Coach.
class DistributedCoachSynchronizationType(Enum):
# In SYNC mode, the trainer waits for all the experiences to be gathered from distributed rollout workers before
# training a new policy and the rollout workers wait for a new policy before gathering experiences.
SYNC = "sync"
# In ASYNC mode, the trainer doesn't wait for any set of experiences to be gathered from distributed rollout workers
# and the rollout workers continously gather experiences loading new policies, whenever they become available.
ASYNC = "async"
def iterable_to_items(obj):
if isinstance(obj, dict) or isinstance(obj, OrderedDict) or isinstance(obj, types.MappingProxyType):
items = obj.items()
elif isinstance(obj, list):
items = enumerate(obj)
else:
raise ValueError("The given object is not a dict or a list")
return items
def unfold_dict_or_list(obj: Union[Dict, List, OrderedDict]):
"""
Recursively unfolds all the parameters in dictionaries and lists
:param obj: a dictionary or list to unfold
:return: the unfolded parameters dictionary
"""
parameters = OrderedDict()
items = iterable_to_items(obj)
for k, v in items:
if isinstance(v, dict) or isinstance(v, list) or isinstance(v, OrderedDict):
if 'tensorflow.' not in str(v.__class__):
parameters[k] = unfold_dict_or_list(v)
elif 'tensorflow.' in str(v.__class__):
parameters[k] = v
elif hasattr(v, '__dict__'):
sub_params = v.__dict__
if '__objclass__' not in sub_params.keys():
try:
parameters[k] = unfold_dict_or_list(sub_params)
except RecursionError:
parameters[k] = sub_params
parameters[k]['__class__'] = v.__class__.__name__
else:
# unfolding this type of object will result in infinite recursion
parameters[k] = sub_params
else:
parameters[k] = v
if not isinstance(obj, OrderedDict) and not isinstance(obj, list):
parameters = OrderedDict(sorted(parameters.items()))
return parameters
class Parameters(object):
def __setattr__(self, key, value):
caller_name = sys._getframe(1).f_code.co_name
if caller_name != '__init__' and not hasattr(self, key):
raise TypeError("Parameter '{}' does not exist in {}. Parameters are only to be defined in a constructor of"
" a class inheriting from Parameters. In order to explicitly register a new parameter "
"outside of a constructor use register_var().".
format(key, self.__class__))
object.__setattr__(self, key, value)
@property
def path(self):
if hasattr(self, 'parameterized_class_name'):
module_path = os.path.relpath(inspect.getfile(self.__class__), os.getcwd())[:-3] + '.py'
return ':'.join([module_path, self.parameterized_class_name])
else:
raise ValueError("The parameters class does not have an attached class it parameterizes. "
"The self.parameterized_class_name should be set to the parameterized class.")
def register_var(self, key, value):
if hasattr(self, key):
raise TypeError("Cannot register an already existing parameter '{}'. ".format(key))
object.__setattr__(self, key, value)
def __str__(self):
result = "\"{}\" {}\n".format(self.__class__.__name__,
json.dumps(unfold_dict_or_list(self.__dict__), indent=4, default=repr))
return result
class AlgorithmParameters(Parameters):
def __init__(self):
# Architecture parameters
self.use_accumulated_reward_as_measurement = False
# Agent parameters
self.num_consecutive_playing_steps = EnvironmentSteps(1)
self.num_consecutive_training_steps = 1 # TODO: update this to TrainingSteps
self.heatup_using_network_decisions = False
self.discount = 0.99
self.apply_gradients_every_x_episodes = 5
self.num_steps_between_copying_online_weights_to_target = TrainingSteps(0)
self.rate_for_copying_weights_to_target = 1.0
self.load_memory_from_file_path = None
self.store_transitions_only_when_episodes_are_terminated = False
# HRL / HER related params
self.in_action_space = None
# distributed agents params
self.share_statistics_between_workers = True
# n-step returns
self.n_step = -1 # calculate the total return (no bootstrap, by default)
# Distributed Coach params
self.distributed_coach_synchronization_type = None
# Should the workers wait for full episode
self.act_for_full_episodes = False
# Support for parameter noise
self.supports_parameter_noise = False
# Override, in retrospective, all the episode rewards with the last reward in the episode
# (sometimes useful for sparse, end of the episode, rewards problems)
self.override_episode_rewards_with_the_last_transition_reward = False
# Filters - TODO consider creating a FilterParameters class and initialize the filters with it
self.update_pre_network_filters_state_on_train = False
self.update_pre_network_filters_state_on_inference = True
class PresetValidationParameters(Parameters):
def __init__(self,
test=False,
min_reward_threshold=0,
max_episodes_to_achieve_reward=1,
num_workers=1,
reward_test_level=None,
test_using_a_trace_test=True,
trace_test_levels=None,
trace_max_env_steps=5000,
read_csv_tries=200):
"""
:param test:
A flag which specifies if the preset should be tested as part of the validation process.
:param min_reward_threshold:
The minimum reward that the agent should pass after max_episodes_to_achieve_reward episodes when the
preset is run.
:param max_episodes_to_achieve_reward:
The maximum number of episodes that the agent should train using the preset in order to achieve the
reward specified by min_reward_threshold.
:param num_workers:
The number of workers that should be used when running this preset in the test suite for validation.
:param reward_test_level:
The environment level or levels, given by a list of strings, that should be tested as part of the
reward tests suite.
:param test_using_a_trace_test:
A flag that specifies if the preset should be run as part of the trace tests suite.
:param trace_test_levels:
The environment level or levels, given by a list of strings, that should be tested as part of the
trace tests suite.
:param trace_max_env_steps:
An integer representing the maximum number of environment steps to run when running this preset as part
of the trace tests suite.
:param read_csv_tries:
The number of retries to attempt for reading the experiment csv file, before declaring failure.
"""
super().__init__()
# setting a seed will only work for non-parallel algorithms. Parallel algorithms add uncontrollable noise in
# the form of different workers starting at different times, and getting different assignments of CPU
# time from the OS.
# Testing parameters
self.test = test
self.min_reward_threshold = min_reward_threshold
self.max_episodes_to_achieve_reward = max_episodes_to_achieve_reward
self.num_workers = num_workers
self.reward_test_level = reward_test_level
self.test_using_a_trace_test = test_using_a_trace_test
self.trace_test_levels = trace_test_levels
self.trace_max_env_steps = trace_max_env_steps
self.read_csv_tries = read_csv_tries
class NetworkParameters(Parameters):
def __init__(self,
force_cpu=False,
async_training=False,
shared_optimizer=True,
scale_down_gradients_by_number_of_workers_for_sync_training=True,
clip_gradients=None,
gradients_clipping_method=GradientClippingMethod.ClipByGlobalNorm,
l2_regularization=0,
learning_rate=0.00025,
learning_rate_decay_rate=0,
learning_rate_decay_steps=0,
input_embedders_parameters={},
embedding_merger_type=EmbeddingMergerType.Concat,
middleware_parameters=None,
heads_parameters=[],
use_separate_networks_per_head=False,
optimizer_type='Adam',
optimizer_epsilon=0.0001,
adam_optimizer_beta1=0.9,
adam_optimizer_beta2=0.99,
rms_prop_optimizer_decay=0.9,
batch_size=32,
replace_mse_with_huber_loss=False,
create_target_network=False,
tensorflow_support=True,
softmax_temperature=1):
"""
:param force_cpu:
Force the neural networks to run on the CPU even if a GPU is available
:param async_training:
If set to True, asynchronous training will be used, meaning that each workers will progress in its own
speed, while not waiting for the rest of the workers to calculate their gradients.
:param shared_optimizer:
If set to True, a central optimizer which will be shared with all the workers will be used for applying
gradients to the network. Otherwise, each worker will have its own optimizer with its own internal
parameters that will only be affected by the gradients calculated by that worker
:param scale_down_gradients_by_number_of_workers_for_sync_training:
If set to True, in synchronous training, the gradients of each worker will be scaled down by the
number of workers. This essentially means that the gradients applied to the network are the average
of the gradients over all the workers.
:param clip_gradients:
A value that will be used for clipping the gradients of the network. If set to None, no gradient clipping
will be applied. Otherwise, the gradients will be clipped according to the gradients_clipping_method.
:param gradients_clipping_method:
A gradient clipping method, defined by a GradientClippingMethod enum, and that will be used to clip the
gradients of the network. This will only be used if the clip_gradients value is defined as a value other
than None.
:param l2_regularization:
A L2 regularization weight that will be applied to the network weights while calculating the loss function
:param learning_rate:
The learning rate for the network
:param learning_rate_decay_rate:
If this value is larger than 0, an exponential decay will be applied to the network learning rate.
The rate of the decay is defined by this parameter, and the number of training steps the decay will be
applied is defined by learning_rate_decay_steps. Notice that both parameters should be defined in order
for this to work correctly.
:param learning_rate_decay_steps:
If the learning_rate_decay_rate of the network is larger than 0, an exponential decay will be applied to
the network learning rate. The number of steps the decay will be applied is defined by this parameter.
Notice that both this parameter, as well as learning_rate_decay_rate should be defined in order for the
learning rate decay to work correctly.
:param input_embedders_parameters:
A dictionary mapping between input names and input embedders (InputEmbedderParameters) to use for the
network. Each of the keys is an input name as returned from the environment in the state.
For example, if the environment returns a state containing 'observation' and 'measurements', then
the keys for the input embedders dictionary can be either 'observation' to use the observation as input,
'measurements' to use the measurements as input, or both.
The embedder type will be automatically selected according to the input type. Vector inputs will
produce a fully connected embedder, and image inputs will produce a convolutional embedder.
:param embedding_merger_type:
The type of embedding merging to use, given by one of the EmbeddingMergerType enum values.
This will be used to merge the outputs of all the input embedders into a single embbeding.
:param middleware_parameters:
The parameters of the middleware to use, given by a MiddlewareParameters object.
Each network will have only a single middleware embedder which will take the merged embeddings from the
input embedders and pass them through more neural network layers.
:param heads_parameters:
A list of heads for the network given by their corresponding HeadParameters.
Each network can have one or multiple network heads, where each one will take the output of the middleware
and make some additional computation on top of it. Additionally, each head calculates a weighted loss value,
and the loss values from all the heads will be summed later on.
:param use_separate_networks_per_head:
A flag that allows using different copies of the input embedders and middleware for each one of the heads.
Regularly, the heads will have a shared input, but in the case where use_separate_networks_per_head is set
to True, each one of the heads will get a different input.
:param optimizer_type:
A string specifying the optimizer type to use for updating the network. The available optimizers are
Adam, RMSProp and LBFGS.
:param optimizer_epsilon:
An internal optimizer parameter used for Adam and RMSProp.
:param adam_optimizer_beta1:
An beta1 internal optimizer parameter used for Adam. It will be used only if Adam was selected as the
optimizer for the network.
:param adam_optimizer_beta2:
An beta2 internal optimizer parameter used for Adam. It will be used only if Adam was selected as the
optimizer for the network.
:param rms_prop_optimizer_decay:
The decay value for the RMSProp optimizer, which will be used only in case the RMSProp optimizer was
selected for this network.
:param batch_size:
The batch size to use when updating the network.
:param replace_mse_with_huber_loss:
:param create_target_network:
If this flag is set to True, an additional copy of the network will be created and initialized with the
same weights as the online network. It can then be queried, and its weights can be synced from the
online network at will.
:param tensorflow_support:
A flag which specifies if the network is supported by the TensorFlow framework.
:param softmax_temperature:
If a softmax is present in the network head output, use this temperature
"""
super().__init__()
self.framework = Frameworks.tensorflow
self.sess = None
# hardware parameters
self.force_cpu = force_cpu
# distributed training options
self.async_training = async_training
self.shared_optimizer = shared_optimizer
self.scale_down_gradients_by_number_of_workers_for_sync_training = scale_down_gradients_by_number_of_workers_for_sync_training
# regularization
self.clip_gradients = clip_gradients
self.gradients_clipping_method = gradients_clipping_method
self.l2_regularization = l2_regularization
# learning rate
self.learning_rate = learning_rate
self.learning_rate_decay_rate = learning_rate_decay_rate
self.learning_rate_decay_steps = learning_rate_decay_steps
# structure
self.input_embedders_parameters = input_embedders_parameters
self.embedding_merger_type = embedding_merger_type
self.middleware_parameters = middleware_parameters
self.heads_parameters = heads_parameters
self.use_separate_networks_per_head = use_separate_networks_per_head
self.optimizer_type = optimizer_type
self.replace_mse_with_huber_loss = replace_mse_with_huber_loss
self.create_target_network = create_target_network
# Framework support
self.tensorflow_support = tensorflow_support
# Hyper-Parameter values
self.optimizer_epsilon = optimizer_epsilon
self.adam_optimizer_beta1 = adam_optimizer_beta1
self.adam_optimizer_beta2 = adam_optimizer_beta2
self.rms_prop_optimizer_decay = rms_prop_optimizer_decay
self.batch_size = batch_size
self.softmax_temperature = softmax_temperature
class NetworkComponentParameters(Parameters):
def __init__(self, dense_layer):
self.dense_layer = dense_layer
class VisualizationParameters(Parameters):
def __init__(self,
print_networks_summary=False,
dump_csv=True,
dump_signals_to_csv_every_x_episodes=5,
dump_gifs=False,
dump_mp4=False,
video_dump_methods=None,
dump_in_episode_signals=False,
dump_parameters_documentation=True,
render=False,
native_rendering=False,
max_fps_for_human_control=10,
tensorboard=False,
add_rendered_image_to_env_response=False):
"""
:param print_networks_summary:
If set to True, a summary of all the networks structure will be printed at the beginning of the experiment
:param dump_csv:
If set to True, the logger will dump logs to a csv file once in every dump_signals_to_csv_every_x_episodes
episodes. The logs can be later used to visualize the training process using Coach Dashboard.
:param dump_signals_to_csv_every_x_episodes:
Defines the number of episodes between writing new data to the csv log files. Lower values can affect
performance, as writing to disk may take time, and it is done synchronously.
:param dump_gifs:
If set to True, GIF videos of the environment will be stored into the experiment directory according to
the filters defined in video_dump_methods.
:param dump_mp4:
If set to True, MP4 videos of the environment will be stored into the experiment directory according to
the filters defined in video_dump_methods.
:param dump_in_episode_signals:
If set to True, csv files will be dumped for each episode for inspecting different metrics within the
episode. This means that for each step in each episode, different metrics such as the reward, the
future return, etc. will be saved. Setting this to True may affect performance severely, and therefore
this should be used only for debugging purposes.
:param dump_parameters_documentation:
If set to True, a json file containing all the agent parameters will be saved in the experiment directory.
This may be very useful for inspecting the values defined for each parameters and making sure that all
the parameters are defined as expected.
:param render:
If set to True, the environment render function will be called for each step, rendering the image of the
environment. This may affect the performance of training, and is highly dependent on the environment.
By default, Coach uses PyGame to render the environment image instead of the environment specific rendered.
To change this, use the native_rendering flag.
:param native_rendering:
If set to True, the environment native renderer will be used for rendering the environment image.
In some cases this can be slower than rendering using PyGame through Coach, but in other cases the
environment opens its native renderer by default, so rendering with PyGame is an unnecessary overhead.
:param max_fps_for_human_control:
The maximum number of frames per second used while playing the environment as a human. This only has
effect while using the --play flag for Coach.
:param tensorboard:
If set to True, TensorBoard summaries will be stored in the experiment directory. This can later be
loaded in TensorBoard in order to visualize the training process.
:param video_dump_methods:
A list of dump methods that will be used as filters for deciding when to save videos.
The filters in the list will be checked one after the other until the first dump method that returns
false for should_dump() in the environment class. This list will only be used if dump_mp4 or dump_gif are
set to True.
:param add_rendered_image_to_env_response:
Some environments have a different observation compared to the one displayed while rendering.
For some cases it can be useful to pass the rendered image to the agent for visualization purposes.
If this flag is set to True, the rendered image will be added to the environment EnvResponse object,
which will be passed to the agent and allow using those images.
"""
super().__init__()
if video_dump_methods is None:
video_dump_methods = [SelectedPhaseOnlyDumpFilter(RunPhase.TEST), MaxDumpFilter()]
self.print_networks_summary = print_networks_summary
self.dump_csv = dump_csv
self.dump_gifs = dump_gifs
self.dump_mp4 = dump_mp4
self.dump_signals_to_csv_every_x_episodes = dump_signals_to_csv_every_x_episodes
self.dump_in_episode_signals = dump_in_episode_signals
self.dump_parameters_documentation = dump_parameters_documentation
self.render = render
self.native_rendering = native_rendering
self.max_fps_for_human_control = max_fps_for_human_control
self.tensorboard = tensorboard
self.video_dump_filters = video_dump_methods
self.add_rendered_image_to_env_response = add_rendered_image_to_env_response
class AgentParameters(Parameters):
def __init__(self, algorithm: AlgorithmParameters, exploration: 'ExplorationParameters', memory: 'MemoryParameters',
networks: Dict[str, NetworkParameters], visualization: VisualizationParameters=VisualizationParameters()):
"""
:param algorithm:
A class inheriting AlgorithmParameters.
The parameters used for the specific algorithm used by the agent.
These parameters can be later referenced in the agent implementation through self.ap.algorithm.
:param exploration:
Either a class inheriting ExplorationParameters or a dictionary mapping between action
space types and their corresponding ExplorationParameters. If a dictionary was used,
when the agent will be instantiated, the correct exploration policy parameters will be used
according to the real type of the environment action space.
These parameters will be used to instantiate the exporation policy.
:param memory:
A class inheriting MemoryParameters. It defines all the parameters used by the memory module.
:param networks:
A dictionary mapping between network names and their corresponding network parmeters, defined
as a class inheriting NetworkParameters. Each element will be used in order to instantiate
a NetworkWrapper class, and all the network wrappers will be stored in the agent under
self.network_wrappers. self.network_wrappers is a dict mapping between the network name that
was given in the networks dict, and the instantiated network wrapper.
:param visualization:
A class inheriting VisualizationParameters and defining various parameters that can be
used for visualization purposes, such as printing to the screen, rendering, and saving videos.
"""
super().__init__()
self.visualization = visualization
self.algorithm = algorithm
self.exploration = exploration
self.memory = memory
self.network_wrappers = networks
self.input_filter = None
self.output_filter = None
self.pre_network_filter = NoInputFilter()
self.full_name_id = None
self.name = None
self.is_a_highest_level_agent = True
self.is_a_lowest_level_agent = True
self.task_parameters = None
self.is_batch_rl_training = False
@property
def path(self):
return 'rl_coach.agents.agent:Agent'
class TaskParameters(Parameters):
def __init__(self, framework_type: Frameworks=Frameworks.tensorflow, evaluate_only: int=None, use_cpu: bool=False,
experiment_path='/tmp', seed=None, checkpoint_save_secs=None, checkpoint_restore_dir=None,
checkpoint_restore_path=None, checkpoint_save_dir=None, export_onnx_graph: bool=False,
apply_stop_condition: bool=False, num_gpu: int=1):
"""
:param framework_type: deep learning framework type. currently only tensorflow is supported
:param evaluate_only: if not None, the task will be used only for evaluating the model for the given number of steps.
A value of 0 means that task will be evaluated for an infinite number of steps.
:param use_cpu: use the cpu for this task
:param experiment_path: the path to the directory which will store all the experiment outputs
:param seed: a seed to use for the random numbers generator
:param checkpoint_save_secs: the number of seconds between each checkpoint saving
:param checkpoint_restore_dir:
[DEPECRATED - will be removed in one of the next releases - switch to checkpoint_restore_path]
the dir to restore the checkpoints from
:param checkpoint_restore_path: the path to restore the checkpoints from
:param checkpoint_save_dir: the directory to store the checkpoints in
:param export_onnx_graph: If set to True, this will export an onnx graph each time a checkpoint is saved
:param apply_stop_condition: If set to True, this will apply the stop condition defined by reaching a target success rate
:param num_gpu: number of GPUs to use
"""
self.framework_type = framework_type
self.task_index = 0 # TODO: not really needed
self.evaluate_only = evaluate_only
self.use_cpu = use_cpu
self.experiment_path = experiment_path
self.checkpoint_save_secs = checkpoint_save_secs
if checkpoint_restore_dir:
screen.warning('TaskParameters.checkpoint_restore_dir is DEPECRATED and will be removed in one of the next '
'releases. Please switch to using TaskParameters.checkpoint_restore_path, with your '
'directory path. ')
self.checkpoint_restore_path = checkpoint_restore_dir
else:
self.checkpoint_restore_path = checkpoint_restore_path
self.checkpoint_save_dir = checkpoint_save_dir
self.seed = seed
self.export_onnx_graph = export_onnx_graph
self.apply_stop_condition = apply_stop_condition
self.num_gpu = num_gpu
class DistributedTaskParameters(TaskParameters):
def __init__(self, framework_type: Frameworks, parameters_server_hosts: str, worker_hosts: str, job_type: str,
task_index: int, evaluate_only: int=None, num_tasks: int=None,
num_training_tasks: int=None, use_cpu: bool=False, experiment_path=None, dnd=None,
shared_memory_scratchpad=None, seed=None, checkpoint_save_secs=None, checkpoint_restore_path=None,
checkpoint_save_dir=None, export_onnx_graph: bool=False, apply_stop_condition: bool=False):
"""
:param framework_type: deep learning framework type. currently only tensorflow is supported
:param evaluate_only: if not None, the task will be used only for evaluating the model for the given number of steps.
A value of 0 means that task will be evaluated for an infinite number of steps.
:param parameters_server_hosts: comma-separated list of hostname:port pairs to which the parameter servers are
assigned
:param worker_hosts: comma-separated list of hostname:port pairs to which the workers are assigned
:param job_type: the job type - either ps (short for parameters server) or worker
:param task_index: the index of the process
:param num_tasks: the number of total tasks that are running (not including the parameters server)
:param num_training_tasks: the number of tasks that are training (not including the parameters server)
:param use_cpu: use the cpu for this task
:param experiment_path: the path to the directory which will store all the experiment outputs
:param dnd: an external DND to use for NEC. This is a workaround needed for a shared DND not using the scratchpad.
:param seed: a seed to use for the random numbers generator
:param checkpoint_save_secs: the number of seconds between each checkpoint saving
:param checkpoint_restore_path: the path to restore the checkpoints from
:param checkpoint_save_dir: the directory to store the checkpoints in
:param export_onnx_graph: If set to True, this will export an onnx graph each time a checkpoint is saved
:param apply_stop_condition: If set to True, this will apply the stop condition defined by reaching a target success rate
"""
super().__init__(framework_type=framework_type, evaluate_only=evaluate_only, use_cpu=use_cpu,
experiment_path=experiment_path, seed=seed, checkpoint_save_secs=checkpoint_save_secs,
checkpoint_restore_path=checkpoint_restore_path, checkpoint_save_dir=checkpoint_save_dir,
export_onnx_graph=export_onnx_graph, apply_stop_condition=apply_stop_condition)
self.parameters_server_hosts = parameters_server_hosts
self.worker_hosts = worker_hosts
self.job_type = job_type
self.task_index = task_index
self.num_tasks = num_tasks
self.num_training_tasks = num_training_tasks
self.device = None # the replicated device which will be used for the global parameters
self.worker_target = None
self.dnd = dnd
self.shared_memory_scratchpad = shared_memory_scratchpad | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/base_parameters.py | 0.590425 | 0.199932 | base_parameters.py | pypi |
import numpy as np
import contextlib
with contextlib.redirect_stdout(None):
import pygame
from pygame.locals import HWSURFACE, DOUBLEBUF
class Renderer(object):
def __init__(self):
self.size = (1, 1)
self.screen = None
self.clock = pygame.time.Clock()
self.display = pygame.display
self.fps = 30
self.pressed_keys = []
self.is_open = False
def create_screen(self, width, height):
"""
Creates a pygame window
:param width: the width of the window
:param height: the height of the window
:return: None
"""
self.size = (width, height)
self.screen = self.display.set_mode(self.size, HWSURFACE | DOUBLEBUF)
self.display.set_caption("Coach")
self.is_open = True
def normalize_image(self, image):
"""
Normalize image values to be between 0 and 255
:param image: 2D/3D array containing an image with arbitrary values
:return: the input image with values rescaled to 0-255
"""
image_min, image_max = image.min(), image.max()
return 255.0 * (image - image_min) / (image_max - image_min)
def render_image(self, image):
"""
Render the given image to the pygame window
:param image: a grayscale or color image in an arbitrary size. assumes that the channels are the last axis
:return: None
"""
if self.is_open:
if len(image.shape) == 2:
image = np.stack([image] * 3)
if len(image.shape) == 3:
if image.shape[0] == 3 or image.shape[0] == 1:
image = np.transpose(image, (1, 2, 0))
surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
surface = pygame.transform.scale(surface, self.size)
self.screen.blit(surface, (0, 0))
self.display.flip()
self.clock.tick()
self.get_events()
def get_events(self):
"""
Get all the window events in the last tick and reponse accordingly
:return: None
"""
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
self.pressed_keys.append(event.key)
# esc pressed
if event.key == pygame.K_ESCAPE:
self.close()
elif event.type == pygame.KEYUP:
if event.key in self.pressed_keys:
self.pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
self.close()
def get_key_names(self, key_ids):
"""
Get the key name for each key index in the list
:param key_ids: a list of key id's
:return: a list of key names corresponding to the key id's
"""
return [pygame.key.name(key_id) for key_id in key_ids]
def close(self):
"""
Close the pygame window
:return: None
"""
self.is_open = False
pygame.quit() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/renderer.py | 0.689096 | 0.4575 | renderer.py | pypi |
import time
import os
from rl_coach.base_parameters import TaskParameters, DistributedCoachSynchronizationType
from rl_coach.checkpoint import CheckpointStateFile, CheckpointStateReader
from rl_coach.data_stores.data_store import SyncFiles
def wait_for(wait_func, data_store=None, timeout=10):
"""
block until wait_func is true
"""
for i in range(timeout):
if data_store:
data_store.load_from_store()
if wait_func():
return
time.sleep(10)
# one last time
if wait_func():
return
raise ValueError((
'Waited {timeout} seconds, but condition timed out'
).format(
timeout=timeout,
))
def wait_for_trainer_ready(checkpoint_dir, data_store=None, timeout=10):
"""
Block until trainer is ready
"""
def wait():
return os.path.exists(os.path.join(checkpoint_dir, SyncFiles.TRAINER_READY.value))
wait_for(wait, data_store, timeout)
def rollout_worker(graph_manager, data_store, num_workers, task_parameters):
"""
wait for first checkpoint then perform rollouts using the model
"""
wait_for_trainer_ready(checkpoint_dir, data_store)
if (
graph_manager.agent_params.algorithm.distributed_coach_synchronization_type
== DistributedCoachSynchronizationType.SYNC
):
timeout = float("inf")
else:
timeout = None
# this could probably be moved up into coach.py
graph_manager.create_graph(task_parameters)
data_store.load_policy(graph_manager, require_new_policy=False, timeout=60)
with graph_manager.phase_context(RunPhase.TRAIN):
# this worker should play a fraction of the total playing steps per rollout
act_steps = (
graph_manager.agent_params.algorithm.num_consecutive_playing_steps
/ num_workers
)
for i in range(graph_manager.improve_steps / act_steps):
if data_store.end_of_policies():
break
graph_manager.act(
act_steps,
wait_for_full_episodes=graph_manager.agent_params.algorithm.act_for_full_episodes,
)
data_store.load_policy(graph_manager, require_new_policy=True, timeout=timeout) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/rollout_worker.py | 0.422624 | 0.166303 | rollout_worker.py | pypi |
from typing import List, Tuple
import numpy as np
from rl_coach.core_types import EnvironmentSteps
class Schedule(object):
def __init__(self, initial_value: float):
self.initial_value = initial_value
self.current_value = initial_value
def step(self):
raise NotImplementedError("")
class ConstantSchedule(Schedule):
def __init__(self, initial_value: float):
super().__init__(initial_value)
def step(self):
pass
class LinearSchedule(Schedule):
"""
A simple linear schedule which decreases or increases over time from an initial to a final value
"""
def __init__(self, initial_value: float, final_value: float, decay_steps: int):
"""
:param initial_value: the initial value
:param final_value: the final value
:param decay_steps: the number of steps that are required to decay the initial value to the final value
"""
super().__init__(initial_value)
self.final_value = final_value
self.decay_steps = decay_steps
self.decay_delta = (initial_value - final_value) / float(decay_steps)
def step(self):
self.current_value -= self.decay_delta
# decreasing schedule
if self.final_value < self.initial_value:
self.current_value = np.clip(self.current_value, self.final_value, self.initial_value)
# increasing schedule
if self.final_value > self.initial_value:
self.current_value = np.clip(self.current_value, self.initial_value, self.final_value)
class PieceWiseSchedule(Schedule):
"""
A schedule which consists of multiple sub-schedules, where each one is used for a defined number of steps
"""
def __init__(self, schedules: List[Tuple[Schedule, EnvironmentSteps]]):
"""
:param schedules: a list of schedules to apply serially. Each element of the list should be a tuple of
2 elements - a schedule and the number of steps to run it in terms of EnvironmentSteps
"""
super().__init__(schedules[0][0].initial_value)
self.schedules = schedules
self.current_schedule = schedules[0]
self.current_schedule_idx = 0
self.current_schedule_step_count = 0
def step(self):
self.current_schedule[0].step()
if self.current_schedule_idx < len(self.schedules) - 1 \
and self.current_schedule_step_count >= self.current_schedule[1].num_steps:
self.current_schedule_idx += 1
self.current_schedule = self.schedules[self.current_schedule_idx]
self.current_schedule_step_count = 0
self.current_value = self.current_schedule[0].current_value
self.current_schedule_step_count += 1
class ExponentialSchedule(Schedule):
"""
A simple exponential schedule which decreases or increases over time from an initial to a final value
"""
def __init__(self, initial_value: float, final_value: float, decay_coefficient: float):
"""
:param initial_value: the initial value
:param final_value: the final value
:param decay_coefficient: the exponential decay coefficient
"""
super().__init__(initial_value)
self.initial_value = initial_value
self.final_value = final_value
self.decay_coefficient = decay_coefficient
self.current_step = 0
self.current_value = self.initial_value
if decay_coefficient < 1 and final_value > initial_value:
raise ValueError("The final value should be lower than the initial value when the decay coefficient < 1")
if decay_coefficient > 1 and initial_value > final_value:
raise ValueError("The final value should be higher than the initial value when the decay coefficient > 1")
def step(self):
self.current_value *= self.decay_coefficient
# decreasing schedule
if self.final_value < self.initial_value:
self.current_value = np.clip(self.current_value, self.final_value, self.initial_value)
# increasing schedule
if self.final_value > self.initial_value:
self.current_value = np.clip(self.current_value, self.initial_value, self.final_value)
self.current_step += 1 | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/schedules.py | 0.938365 | 0.623893 | schedules.py | pypi |
import copy
import datetime
import os
import sys
import time
from itertools import cycle
from os import listdir
from os.path import isfile, join, isdir
from bokeh.layouts import row, column, Spacer, ToolbarBox
from bokeh.models import ColumnDataSource, Range1d, LinearAxis, Legend, \
WheelZoomTool, CrosshairTool, ResetTool, SaveTool, Toolbar, PanTool, BoxZoomTool, \
Toggle
from bokeh.models.callbacks import CustomJS
from bokeh.models.widgets import RadioButtonGroup, MultiSelect, Button, Select, Slider, Div, CheckboxGroup
from bokeh.plotting import figure
from rl_coach.dashboard_components.globals import signals_files, x_axis_labels, x_axis_options, show_spinner, hide_spinner, \
dialog, FolderType, RunType, add_directory_csv_files, doc, display_boards, layouts, \
crcolor, crx, cry, color_resolution, crRGBs, rgb_to_hex, x_axis
from rl_coach.dashboard_components.signals_files_group import SignalsFilesGroup
from rl_coach.dashboard_components.signals_file import SignalsFile
def update_axis_range(name, range_placeholder):
max_val = -float('inf')
min_val = float('inf')
selected_signal = None
if name in x_axis_options:
selected_signal = name
for signals_file in signals_files.values():
curr_min_val, curr_max_val = signals_file.get_range_of_selected_signals_on_axis(name, selected_signal)
max_val = max(max_val, curr_max_val)
min_val = min(min_val, curr_min_val)
if min_val != float('inf'):
if min_val == max_val:
range = 5
else:
range = max_val - min_val
range_placeholder.start = min_val - 0.1 * range
range_placeholder.end = max_val + 0.1 * range
# update axes ranges
def update_y_axis_ranges():
update_axis_range('default', plot.y_range)
update_axis_range('secondary', plot.extra_y_ranges['secondary'])
def update_x_axis_ranges():
update_axis_range(x_axis[0], plot.x_range)
def get_all_selected_signals():
signals = []
for signals_file in signals_files.values():
signals += signals_file.get_selected_signals()
return signals
# update legend using the legend text dictionary
def update_legend():
selected_signals = get_all_selected_signals()
max_line_length = 50
items = []
for signal in selected_signals:
side_sign = "◀" if signal.axis == 'default' else "▶"
signal_name = side_sign + " " + signal.full_name
# bokeh legend does not respect a max_width parameter so we split the text manually to lines of constant width
signal_name = [signal_name[n:n + max_line_length] for n in range(0, len(signal_name), max_line_length)]
for idx, substr in enumerate(signal_name):
if idx == 0:
lines = [signal.line]
if signal.show_bollinger_bands:
lines.append(signal.bands)
items.append((substr, lines))
else:
items.append((substr, []))
if bokeh_legend.items == [] or items == [] or \
any([legend_item.renderers != item[1] for legend_item, item in zip(bokeh_legend.items, items)])\
or any([legend_item.label != item[0] for legend_item, item in zip(bokeh_legend.items, items)]):
bokeh_legend.items = items # this step takes a long time because it is redrawing the plot
# the visible=false => visible=true is a hack to make the legend render again
bokeh_legend.visible = False
bokeh_legend.visible = True
# select lines to display
def select_data(args, old, new):
if selected_file is None:
return
show_spinner("Updating the signal selection...")
selected_signals = new
for signal_name in selected_file.signals.keys():
is_selected = signal_name in selected_signals
selected_file.set_signal_selection(signal_name, is_selected)
# update axes ranges
update_y_axis_ranges()
update_x_axis_ranges()
# update the legend
update_legend()
hide_spinner()
# add new lines to the plot
def plot_signals(signals_file, signals):
for idx, signal in enumerate(signals):
signal.line = plot.line('index', signal.name, source=signals_file.bokeh_source,
line_color=signal.color, line_width=2)
def open_file_dialog():
return dialog.getFileDialog()
def open_directory_dialog():
return dialog.getDirDialog()
# will create a group from the files
def create_files_group_signal(files):
global selected_file
signals_file = SignalsFilesGroup(files, plot)
signals_files[signals_file.filename] = signals_file
filenames = [signals_file.filename]
if files_selector.options[0] == "":
files_selector.options = filenames
else:
files_selector.options = files_selector.options + filenames
files_selector.value = filenames[0]
selected_file = signals_file
# load files from disk as a group
def load_file():
file = open_file_dialog()
show_spinner("Loading file...")
# no file selected
if not file:
hide_spinner()
return
display_boards()
create_files_signal([file])
change_selected_signals_in_data_selector([""])
hide_spinner()
# classify the folder as containing a single file, multiple files or only folders
def classify_folder(dir_path):
files = [f for f in listdir(dir_path) if isfile(join(dir_path, f)) and f.endswith('.csv')]
folders = [d for d in listdir(dir_path) if isdir(join(dir_path, d)) and any(f.endswith(".csv") for f in os.listdir(join(dir_path, d)))]
if len(files) == 1:
return FolderType.SINGLE_FILE
elif len(files) > 1:
return FolderType.MULTIPLE_FILES
elif len(folders) == 1:
return classify_folder(join(dir_path, folders[0]))
elif len(folders) > 1:
return FolderType.MULTIPLE_FOLDERS
else:
return FolderType.EMPTY
# finds if this is single-threaded or multi-threaded
def get_run_type(dir_path):
folder_type = classify_folder(dir_path)
if folder_type == FolderType.SINGLE_FILE:
folder_type = RunType.SINGLE_FOLDER_SINGLE_FILE
elif folder_type == FolderType.MULTIPLE_FILES:
folder_type = RunType.SINGLE_FOLDER_MULTIPLE_FILES
elif folder_type == FolderType.MULTIPLE_FOLDERS:
# folder contains sub dirs -> we assume we can classify the folder using only the first sub dir
sub_dirs = [d for d in listdir(dir_path) if isdir(join(dir_path, d))]
# checking only the first folder in the root dir for its type, since we assume that all sub dirs will share the
# same structure (i.e. if one is a result of multi-threaded run, so will all the other).
folder_type = classify_folder(os.path.join(dir_path, sub_dirs[0]))
if folder_type == FolderType.SINGLE_FILE:
folder_type = RunType.MULTIPLE_FOLDERS_SINGLE_FILES
elif folder_type == FolderType.MULTIPLE_FILES:
folder_type = RunType.MULTIPLE_FOLDERS_MULTIPLE_FILES
return folder_type
# create a signal file from the directory path according to the directory underlying structure
def handle_dir(dir_path, run_type):
paths = add_directory_csv_files(dir_path)
if run_type in [RunType.SINGLE_FOLDER_MULTIPLE_FILES,
RunType.MULTIPLE_FOLDERS_SINGLE_FILES]:
create_files_group_signal(paths)
elif run_type == RunType.SINGLE_FOLDER_SINGLE_FILE:
create_files_signal(paths, use_dir_name=True)
elif run_type == RunType.MULTIPLE_FOLDERS_MULTIPLE_FILES:
sub_dirs = [d for d in listdir(dir_path) if isdir(join(dir_path, d))]
create_files_group_signal([os.path.join(dir_path, d) for d in sub_dirs])
# load directory from disk as a group
def load_directory_group():
directory = open_directory_dialog()
show_spinner("Loading directories group...")
# no files selected
if not directory:
hide_spinner()
return
display_directory_group(directory)
def display_directory_group(directory):
pause_auto_update()
display_boards()
show_spinner("Loading directories group...")
while get_run_type(directory) == FolderType.EMPTY:
show_spinner("Waiting for experiment directory to get populated...")
sys.stdout.write("Waiting for experiment directory to get populated...\r")
time.sleep(10)
handle_dir(directory, get_run_type(directory))
change_selected_signals_in_data_selector([""])
resume_auto_update_according_to_toggle()
hide_spinner()
def create_files_signal(files, use_dir_name=False):
global selected_file
new_signal_files = []
for idx, file_path in enumerate(files):
signals_file = SignalsFile(str(file_path), plot=plot, use_dir_name=use_dir_name)
signals_files[signals_file.filename] = signals_file
new_signal_files.append(signals_file)
filenames = [f.filename for f in new_signal_files]
if files_selector.options[0] == "":
files_selector.options = filenames
else:
files_selector.options = files_selector.options + filenames
files_selector.value = filenames[0]
selected_file = new_signal_files[0]
# update x axis according to the file's default x-axis (which is the index, and thus the first column)
idx = x_axis_options.index(new_signal_files[0].csv.columns[0])
change_x_axis(idx)
x_axis_selector.active = idx
def display_files(files):
pause_auto_update()
display_boards()
show_spinner("Loading files...")
create_files_signal(files)
change_selected_signals_in_data_selector([""])
resume_auto_update_according_to_toggle()
hide_spinner()
def unload_file():
global selected_file
if selected_file is None:
return
selected_file.hide_all_signals()
del signals_files[selected_file.filename]
data_selector.options = [""]
filenames_list = copy.copy(files_selector.options)
filenames_list.remove(selected_file.filename)
if len(filenames_list) == 0:
filenames_list = [""]
files_selector.options = filenames_list
filenames = cycle(filenames_list)
if files_selector.options[0] != "":
files_selector.value = next(filenames)
else:
files_selector.value = None
update_legend()
refresh_info.text = ""
if len(signals_files) == 0:
selected_file = None
# reload the selected csv file
def reload_all_files(force=False):
pause_auto_update()
for file_to_load in signals_files.values():
if force or file_to_load.file_was_modified_on_disk():
show_spinner("Updating files from the disk...")
file_to_load.load()
hide_spinner()
refresh_info.text = "Last Update: " + str(datetime.datetime.now()).split(".")[0]
resume_auto_update_according_to_toggle()
# unselect the currently selected signals and then select the requested signals in the data selector
def change_selected_signals_in_data_selector(selected_signals):
# the default bokeh way is not working due to a bug since Bokeh 0.12.6 (https://github.com/bokeh/bokeh/issues/6501)
# remove the data selection callback before updating the selector
data_selector.remove_on_change('value', select_data)
for value in list(data_selector.value):
if value in data_selector.options:
index = data_selector.options.index(value)
data_selector.options.remove(value)
data_selector.value.remove(value)
data_selector.options.insert(index, value)
data_selector.value = selected_signals
# add back the data selection callback
data_selector.on_change('value', select_data)
# change data options according to the selected file
def change_data_selector(args, old, new):
global selected_file
if new is None:
selected_file = None
return
show_spinner("Updating selection...")
selected_file = signals_files[new]
if isinstance(selected_file, SignalsFile):
group_cb.disabled = True
elif isinstance(selected_file, SignalsFilesGroup):
group_cb.disabled = False
data_selector.remove_on_change('value', select_data)
data_selector.options = sorted(list(selected_file.signals.keys()))
data_selector.on_change('value', select_data)
selected_signal_names = [s.name for s in selected_file.signals.values() if s.selected]
if not selected_signal_names:
selected_signal_names = [""]
change_selected_signals_in_data_selector(selected_signal_names)
averaging_slider.value = selected_file.signals_averaging_window
if len(averaging_slider_dummy_source.data['value']) > 0:
averaging_slider_dummy_source.data['value'][0] = selected_file.signals_averaging_window
group_cb.active = [0 if selected_file.show_bollinger_bands else None]
group_cb.active += [1 if selected_file.separate_files else None]
hide_spinner()
# smooth all the signals of the selected file
def update_averaging(args, old, new):
show_spinner("Smoothing the signals...")
# get the actual value from the dummy source
new = averaging_slider_dummy_source.data['value'][0]
selected_file.change_averaging_window(new)
hide_spinner()
def change_x_axis(val):
global x_axis
show_spinner("Updating the X axis...")
x_axis[0] = x_axis_options[val]
plot.xaxis.axis_label = x_axis_labels[val]
for file_to_load in signals_files.values():
file_to_load.update_x_axis_index()
# this is needed in order to recalculate the mean of all the files
if isinstance(file_to_load, SignalsFilesGroup):
file_to_load.load()
update_axis_range(x_axis[0], plot.x_range)
hide_spinner()
# move the signal between the main and secondary Y axes
def toggle_second_axis():
show_spinner("Switching the Y axis...")
plot.yaxis[-1].visible = True
selected_file.toggle_y_axis()
# this is just for redrawing the signals
selected_file.reload_data()
update_y_axis_ranges()
update_legend()
hide_spinner()
def toggle_group_property(new):
show_spinner("Loading...")
# toggle show / hide Bollinger bands
selected_file.change_bollinger_bands_state(0 in new)
# show a separate signal for each file in a group
selected_file.show_files_separately(1 in new)
update_legend()
hide_spinner()
# Color selection - most of these functions are taken from bokeh examples (plotting/color_sliders.py)
def select_color(attr, old, new):
show_spinner("Changing signal color...")
signals = selected_file.get_selected_signals()
for signal in signals:
signal.set_color(rgb_to_hex(crRGBs[new['1d']['indices'][0]]))
hide_spinner()
def pause_auto_update():
toggle_auto_update(False)
def resume_auto_update_according_to_toggle():
toggle_auto_update(auto_update_toggle_button.active)
def toggle_auto_update(new):
global file_update_callback
if new is False and file_update_callback in doc._session_callbacks:
doc.remove_periodic_callback(file_update_callback)
elif file_update_callback not in doc._session_callbacks:
file_update_callback = doc.add_periodic_callback(reload_all_files, 30000)
file_update_callback = doc.add_periodic_callback(reload_all_files, 30000)
# ---------------- Build Website Layout -------------------
# file refresh time placeholder
refresh_info = Div(text="""""", width=210)
# create figures
plot = figure(plot_width=1200, plot_height=800,
# tools='pan,box_zoom,wheel_zoom,crosshair,undo,redo,reset,save',
toolbar_location=None, x_axis_label='Episodes',
x_range=Range1d(0, 10000), y_range=Range1d(0, 100000), lod_factor=1000)
plot.extra_y_ranges = {"secondary": Range1d(start=-100, end=200)}
plot.add_layout(LinearAxis(y_range_name="secondary"), 'right')
toolbar = Toolbar(tools=[PanTool(), BoxZoomTool(), WheelZoomTool(), CrosshairTool(), ResetTool(), SaveTool()])
# plot.toolbar = toolbar
plot.add_tools(*toolbar.tools)
plot.yaxis[-1].visible = False
bokeh_legend = Legend(
items=[("", [])],
orientation="vertical",
border_line_color="black",
label_text_font_size={'value': '9pt'},
click_policy='hide',
visible=False
)
bokeh_legend.label_width = 100
plot.add_layout(bokeh_legend, "right")
plot.y_range = Range1d(0, 100)
plot.extra_y_ranges['secondary'] = Range1d(0, 100)
# select file
file_selection_button = Button(label="Select File", button_type="success", width=120)
file_selection_button.on_click(load_file)
files_selector_spacer = Spacer(width=10)
group_selection_button = Button(label="Select Directory", button_type="primary", width=140)
group_selection_button.on_click(load_directory_group)
update_files_button = Button(label="Update Files", button_type="default", width=50)
update_files_button.on_click(reload_all_files)
auto_update_toggle_button = Toggle(label="Auto Update", button_type="default", width=50, active=True)
auto_update_toggle_button.on_click(toggle_auto_update)
unload_file_button = Button(label="Unload", button_type="danger", width=50)
unload_file_button.on_click(unload_file)
# files selection box
files_selector = Select(title="Files:", options=[""])
files_selector.on_change('value', change_data_selector)
# data selection box
data_selector = MultiSelect(title="Data:", options=[], size=12)
data_selector.on_change('value', select_data)
# x axis selection box
x_axis_selector_title = Div(text="""X Axis:""", height=10)
x_axis_selector = RadioButtonGroup(labels=x_axis_options, active=0)
x_axis_selector.on_click(change_x_axis)
# toggle second axis button
toggle_second_axis_button = Button(label="Toggle Second Axis", button_type="success")
toggle_second_axis_button.on_click(toggle_second_axis)
# averaging slider
# This data source is just used to communicate / trigger the real callback
averaging_slider_dummy_source = ColumnDataSource(data=dict(value=[]))
averaging_slider_dummy_source.on_change('data', update_averaging)
averaging_slider = Slider(title="Averaging window", start=1, end=101, step=10, callback_policy='mouseup')
averaging_slider.callback = CustomJS(args=dict(source=averaging_slider_dummy_source), code="""
source.data = { value: [cb_obj.value] }
""")
# group properties checkbox
group_cb = CheckboxGroup(labels=["Show statistics bands", "Ungroup signals"], active=[])
group_cb.on_click(toggle_group_property)
# color selector
color_selector_title = Div(text="""Select Color:""")
crsource = ColumnDataSource(data=dict(x=crx, y=cry, crcolor=crcolor, RGBs=crRGBs))
color_selector = figure(x_range=(0, color_resolution), y_range=(0, 10),
plot_width=300, plot_height=40,
tools='tap')
color_selector.axis.visible = False
color_range = color_selector.rect(x='x', y='y', width=1, height=10,
color='crcolor', source=crsource)
crsource.on_change('selected', select_color)
color_range.nonselection_glyph = color_range.glyph
color_selector.toolbar.logo = None
color_selector.toolbar_location = None
# main layout of the document
layout = row(file_selection_button, files_selector_spacer, group_selection_button, width=300)
layout = column(layout, files_selector)
layout = column(layout, row(update_files_button, Spacer(width=50), auto_update_toggle_button,
Spacer(width=50), unload_file_button))
layout = column(layout, row(refresh_info))
layout = column(layout, data_selector)
layout = column(layout, color_selector_title)
layout = column(layout, color_selector)
layout = column(layout, x_axis_selector_title)
layout = column(layout, x_axis_selector)
layout = column(layout, group_cb)
layout = column(layout, toggle_second_axis_button)
layout = column(layout, averaging_slider)
toolbox = ToolbarBox(toolbar=toolbar, toolbar_location='above')
panel = column(toolbox, plot)
layout = row(layout, panel)
experiment_board_layout = layout
layouts["experiment_board"] = experiment_board_layout | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/dashboard_components/experiment_board.py | 0.40486 | 0.214527 | experiment_board.py | pypi |
from bokeh.layouts import row, column, widgetbox, Spacer
from bokeh.models import ColumnDataSource, Range1d, LinearAxis, Legend
from bokeh.models.widgets import RadioButtonGroup, MultiSelect, Button, Select, Slider, Div, CheckboxGroup, Toggle
from bokeh.plotting import figure
from rl_coach.dashboard_components.globals import layouts, crcolor, crx, cry, color_resolution, crRGBs
from rl_coach.dashboard_components.experiment_board import file_selection_button, files_selector_spacer, \
group_selection_button, unload_file_button, files_selector
# ---------------- Build Website Layout -------------------
# file refresh time placeholder
refresh_info = Div(text="""""", width=210)
# create figures
plot = figure(plot_width=1200, plot_height=800,
tools='pan,box_zoom,wheel_zoom,crosshair,undo,redo,reset,save',
toolbar_location='above', x_axis_label='Episodes',
x_range=Range1d(0, 10000), y_range=Range1d(0, 100000))
plot.extra_y_ranges = {"secondary": Range1d(start=-100, end=200)}
plot.add_layout(LinearAxis(y_range_name="secondary"), 'right')
plot.yaxis[-1].visible = False
# legend
div = Div(text="""""")
legend = widgetbox([div])
bokeh_legend = Legend(
# items=[("12345678901234567890123456789012345678901234567890", [])], # 50 letters
items=[("__________________________________________________", [])], # 50 letters
location=(0, 0), orientation="vertical",
border_line_color="black",
label_text_font_size={'value': '9pt'},
margin=30
)
plot.add_layout(bokeh_legend, "right")
# select file
file_selection_button = Button(label="Select Files", button_type="success", width=120)
# file_selection_button.on_click(load_files_group)
files_selector_spacer = Spacer(width=10)
group_selection_button = Button(label="Select Directory", button_type="primary", width=140)
# group_selection_button.on_click(load_directory_group)
unload_file_button = Button(label="Unload", button_type="danger", width=50)
# unload_file_button.on_click(unload_file)
# files selection box
files_selector = Select(title="Files:", options=[])
# files_selector.on_change('value', change_data_selector)
# data selection box
data_selector = MultiSelect(title="Data:", options=[], size=12)
# data_selector.on_change('value', select_data)
# toggle second axis button
toggle_second_axis_button = Button(label="Toggle Second Axis", button_type="success")
# toggle_second_axis_button.on_click(toggle_second_axis)
# averaging slider
averaging_slider = Slider(title="Averaging window", start=1, end=101, step=10)
# averaging_slider.on_change('value', update_averaging)
# color selector
color_selector_title = Div(text="""Select Color:""")
crsource = ColumnDataSource(data=dict(x=crx, y=cry, crcolor=crcolor, RGBs=crRGBs))
color_selector = figure(x_range=(0, color_resolution), y_range=(0, 10),
plot_width=300, plot_height=40,
tools='tap')
color_selector.axis.visible = False
color_range = color_selector.rect(x='x', y='y', width=1, height=10,
color='crcolor', source=crsource)
# crsource.on_change('selected', select_color)
color_range.nonselection_glyph = color_range.glyph
color_selector.toolbar.logo = None
color_selector.toolbar_location = None
episode_selector = MultiSelect(title="Episode:", options=['0', '1', '2', '3', '4'], size=1)
online_toggle = Toggle(label="Online", button_type="success")
# main layout of the document
layout = row(file_selection_button, files_selector_spacer, group_selection_button, width=300)
layout = column(layout, files_selector)
layout = column(layout, row(refresh_info, unload_file_button))
layout = column(layout, data_selector)
layout = column(layout, color_selector_title)
layout = column(layout, color_selector)
layout = column(layout, toggle_second_axis_button)
layout = column(layout, averaging_slider)
layout = column(layout, episode_selector)
layout = column(layout, online_toggle)
layout = row(layout, plot)
episodic_board_layout = layout
layouts["episodic_board"] = episodic_board_layout | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/dashboard_components/episodic_board.py | 0.669205 | 0.369287 | episodic_board.py | pypi |
from collections import OrderedDict
import os
from genericpath import isdir, isfile
from os import listdir
from os.path import join
from enum import Enum
from bokeh.models import Div
from bokeh.plotting import curdoc
import tkinter as tk
from tkinter import filedialog
import colorsys
from rl_coach.core_types import TimeTypes
patches = {}
signals_files = {}
selected_file = None
x_axis = ['Episode #']
x_axis_options = [time_type.value.name for time_type in TimeTypes]
x_axis_labels = [time_type.value.label for time_type in TimeTypes]
current_color = 0
# spinner
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(root_dir, 'dashboard_components/spinner.css'), 'r') as f:
spinner_style = """<style>{}</style>""".format(f.read())
spinner_html = """<ul class="spinner"><li></li><li></li><li></li><li></li>
<li>
<br>
<span style="font-size: 24px; font-weight: bold; margin-left: -175px; width: 400px;
position: absolute; text-align: center;">
{}
</span>
</li></ul>"""
spinner = Div(text="""""")
displayed_doc = "landing_page"
layouts = {}
def generate_color_range(N, I):
HSV_tuples = [(x*1.0/N, 0.5, I) for x in range(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
for_conversion = []
for RGB_tuple in RGB_tuples:
for_conversion.append((int(RGB_tuple[0]*255), int(RGB_tuple[1]*255), int(RGB_tuple[2]*255)))
hex_colors = [rgb_to_hex(RGB_tuple) for RGB_tuple in for_conversion]
return hex_colors, for_conversion
# convert RGB tuple to hexadecimal code
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
# convert hexadecimal to RGB tuple
def hex_to_dec(hex):
red = ''.join(hex.strip('#')[0:2])
green = ''.join(hex.strip('#')[2:4])
blue = ''.join(hex.strip('#')[4:6])
return int(red, 16), int(green, 16), int(blue,16)
color_resolution = 1000
brightness = 0.75 # change to have brighter/darker colors
crx = list(range(1, color_resolution+1)) # the resolution is 1000 colors
cry = [5 for i in range(len(crx))]
crcolor, crRGBs = generate_color_range(color_resolution, brightness) # produce spectrum
def display_boards():
global displayed_doc
if displayed_doc == "landing_page":
doc.remove_root(doc.roots[0])
doc.add_root(layouts["boards"])
displayed_doc = "boards"
def show_spinner(text="Loading..."):
spinner.text = spinner_style + spinner_html.format(text)
def hide_spinner():
spinner.text = ""
# takes path to dir and recursively adds all its files to paths
def add_directory_csv_files(dir_path, paths=None):
if not paths:
paths = []
for p in listdir(dir_path):
path = join(dir_path, p)
if isdir(path):
# call recursively for each dir
paths = add_directory_csv_files(path, paths)
elif isfile(path) and path.endswith('.csv'):
# add every file to the list
paths.append(path)
return paths
class DialogApp():
def getFileDialog(self):
application_window = tk.Tk()
# Build a list of tuples for each file type the file dialog should display
my_filetypes = [('csv files', '.csv')]
# Ask the user to select a one or more file names.
answer = filedialog.askopenfilename(parent=application_window,
initialdir=os.getcwd(),
title="Please select a file",
filetypes=my_filetypes)
application_window.destroy()
return answer
def getDirDialog(self):
application_window = tk.Tk()
# Ask the user to select a folder.
answer = filedialog.askdirectory(parent=application_window,
initialdir=os.getcwd(),
title="Please select a folder")
application_window.destroy()
return answer
class RunType(Enum):
SINGLE_FOLDER_SINGLE_FILE = 1
SINGLE_FOLDER_MULTIPLE_FILES = 2
MULTIPLE_FOLDERS_SINGLE_FILES = 3
MULTIPLE_FOLDERS_MULTIPLE_FILES = 4
UNKNOWN = 0
class FolderType(Enum):
SINGLE_FILE = 1
MULTIPLE_FILES = 2
MULTIPLE_FOLDERS = 3
EMPTY = 4
dialog = DialogApp()
doc = curdoc() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/dashboard_components/globals.py | 0.449151 | 0.179387 | globals.py | pypi |
import random
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.palettes import Dark2
from rl_coach.dashboard_components.globals import show_spinner, hide_spinner, current_color
from rl_coach.utils import squeeze_list
class Signal:
def __init__(self, name, parent, plot):
self.name = name
self.full_name = "{}/{}".format(parent.filename, self.name)
self.plot = plot
self.selected = False
self.color = random.choice(Dark2[8])
self.line = None
self.scatter = None
self.bands = None
self.bokeh_source = parent.bokeh_source
self.min_val = 0
self.max_val = 0
self.axis = 'default'
self.sub_signals = []
for name in self.bokeh_source.data.keys():
if (len(name.split('/')) == 1 and name == self.name) or '/'.join(name.split('/')[:-1]) == self.name:
self.sub_signals.append(name)
if len(self.sub_signals) > 1:
self.mean_signal = squeeze_list([name for name in self.sub_signals if 'Mean' in name.split('/')[-1]])
self.stdev_signal = squeeze_list([name for name in self.sub_signals if 'Stdev' in name.split('/')[-1]])
self.min_signal = squeeze_list([name for name in self.sub_signals if 'Min' in name.split('/')[-1]])
self.max_signal = squeeze_list([name for name in self.sub_signals if 'Max' in name.split('/')[-1]])
else:
self.mean_signal = squeeze_list(self.name)
self.stdev_signal = None
self.min_signal = None
self.max_signal = None
self.has_bollinger_bands = False
if self.mean_signal and self.stdev_signal and self.min_signal and self.max_signal:
self.has_bollinger_bands = True
self.show_bollinger_bands = False
self.bollinger_bands_source = None
self.update_range()
def set_color(self, color):
self.color = color
if self.line:
self.line.glyph.line_color = color
if self.bands:
self.bands.glyph.fill_color = color
def plot_line(self):
global current_color
self.set_color(Dark2[8][current_color])
current_color = (current_color + 1) % len(Dark2[8])
if self.has_bollinger_bands:
self.set_bands_source()
self.create_bands()
self.line = self.plot.line('index', self.mean_signal, source=self.bokeh_source,
line_color=self.color, line_width=2)
# self.scatter = self.plot.scatter('index', self.mean_signal, source=self.bokeh_source)
self.line.visible = True
def set_selected(self, val):
if self.selected != val:
self.selected = val
if self.line:
# self.set_color(Dark2[8][current_color])
# current_color = (current_color + 1) % len(Dark2[8])
self.line.visible = self.selected
if self.bands:
self.bands.visible = self.selected and self.show_bollinger_bands
elif self.selected:
# lazy plotting - plot only when selected for the first time
self.plot_line()
def set_dash(self, dash):
self.line.glyph.line_dash = dash
def create_bands(self):
self.bands = self.plot.patch(x='band_x', y='band_y', source=self.bollinger_bands_source,
color=self.color, fill_alpha=0.4, alpha=0.1, line_width=0)
self.bands.visible = self.show_bollinger_bands
# self.min_line = plot.line('index', self.min_signal, source=self.bokeh_source,
# line_color=self.color, line_width=3, line_dash="4 4")
# self.max_line = plot.line('index', self.max_signal, source=self.bokeh_source,
# line_color=self.color, line_width=3, line_dash="4 4")
# self.min_line.visible = self.show_bollinger_bands
# self.max_line.visible = self.show_bollinger_bands
def set_bands_source(self):
x_ticks = self.bokeh_source.data['index']
mean_values = self.bokeh_source.data[self.mean_signal]
stdev_values = self.bokeh_source.data[self.stdev_signal]
band_x = np.append(x_ticks, x_ticks[::-1])
band_y = np.append(mean_values - stdev_values, mean_values[::-1] + stdev_values[::-1])
source_data = {'band_x': band_x, 'band_y': band_y}
if self.bollinger_bands_source:
self.bollinger_bands_source.data = source_data
else:
self.bollinger_bands_source = ColumnDataSource(source_data)
def change_bollinger_bands_state(self, new_state):
self.show_bollinger_bands = new_state
if self.bands and self.selected:
self.bands.visible = new_state
# self.min_line.visible = new_state
# self.max_line.visible = new_state
def update_range(self):
self.min_val = np.min(self.bokeh_source.data[self.mean_signal])
self.max_val = np.max(self.bokeh_source.data[self.mean_signal])
def set_axis(self, axis):
self.axis = axis
if not self.line:
self.plot_line()
self.line.visible = False
self.line.y_range_name = axis
def toggle_axis(self):
if self.axis == 'default':
self.set_axis('secondary')
else:
self.set_axis('default') | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/dashboard_components/signals.py | 0.507812 | 0.202778 | signals.py | pypi |
from typing import List
import numpy as np
from rl_coach.core_types import RunPhase, ActionType, EnvironmentSteps
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.e_greedy import EGreedy, EGreedyParameters
from rl_coach.exploration_policies.exploration_policy import ExplorationParameters
from rl_coach.schedules import Schedule, LinearSchedule, PieceWiseSchedule
from rl_coach.spaces import ActionSpace
class UCBParameters(EGreedyParameters):
def __init__(self):
super().__init__()
self.architecture_num_q_heads = 10
self.bootstrapped_data_sharing_probability = 1.0
self.epsilon_schedule = PieceWiseSchedule([
(LinearSchedule(1, 0.1, 1000000), EnvironmentSteps(1000000)),
(LinearSchedule(0.1, 0.01, 4000000), EnvironmentSteps(4000000))
])
self.lamb = 0.1
@property
def path(self):
return 'rl_coach.exploration_policies.ucb:UCB'
class UCB(EGreedy):
"""
UCB exploration policy is following the upper confidence bound heuristic to sample actions in discrete action spaces.
It assumes that there are multiple network heads that are predicting action values, and that the standard deviation
between the heads predictions represents the uncertainty of the agent in each of the actions.
It then updates the action value estimates to by mean(actions)+lambda*stdev(actions), where lambda is
given by the user. This exploration policy aims to take advantage of the uncertainty of the agent in its predictions,
and select the action according to the tradeoff between how uncertain the agent is, and how large it predicts
the outcome from those actions to be.
"""
def __init__(self, action_space: ActionSpace, epsilon_schedule: Schedule, evaluation_epsilon: float,
architecture_num_q_heads: int, lamb: int,
continuous_exploration_policy_parameters: ExplorationParameters = AdditiveNoiseParameters()):
"""
:param action_space: the action space used by the environment
:param epsilon_schedule: a schedule for the epsilon values
:param evaluation_epsilon: the epsilon value to use for evaluation phases
:param architecture_num_q_heads: the number of q heads to select from
:param lamb: lambda coefficient for taking the standard deviation into account
:param continuous_exploration_policy_parameters: the parameters of the continuous exploration policy to use
if the e-greedy is used for a continuous policy
"""
super().__init__(action_space, epsilon_schedule, evaluation_epsilon, continuous_exploration_policy_parameters)
self.num_heads = architecture_num_q_heads
self.lamb = lamb
self.std = 0
self.last_action_values = 0
def select_head(self):
pass
def get_action(self, action_values: List[ActionType]) -> ActionType:
# action values are none in case the exploration policy is going to select a random action
if action_values is not None:
if self.requires_action_values():
mean = np.mean(action_values, axis=0)
if self.phase == RunPhase.TRAIN:
self.std = np.std(action_values, axis=0)
self.last_action_values = mean + self.lamb * self.std
else:
self.last_action_values = mean
return super().get_action(self.last_action_values)
def get_control_param(self):
if self.phase == RunPhase.TRAIN:
return np.mean(self.std)
else:
return 0 | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/ucb.py | 0.940599 | 0.547162 | ucb.py | pypi |
from typing import List
import numpy as np
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.exploration_policies.exploration_policy import DiscreteActionExplorationPolicy, ExplorationParameters
from rl_coach.schedules import Schedule
from rl_coach.spaces import ActionSpace
class BoltzmannParameters(ExplorationParameters):
def __init__(self):
super().__init__()
self.temperature_schedule = None
@property
def path(self):
return 'rl_coach.exploration_policies.boltzmann:Boltzmann'
class Boltzmann(DiscreteActionExplorationPolicy):
"""
The Boltzmann exploration policy is intended for discrete action spaces. It assumes that each of the possible
actions has some value assigned to it (such as the Q value), and uses a softmax function to convert these values
into a distribution over the actions. It then samples the action for playing out of the calculated distribution.
An additional temperature schedule can be given by the user, and will control the steepness of the softmax function.
"""
def __init__(self, action_space: ActionSpace, temperature_schedule: Schedule):
"""
:param action_space: the action space used by the environment
:param temperature_schedule: the schedule for the temperature parameter of the softmax
"""
super().__init__(action_space)
self.temperature_schedule = temperature_schedule
def get_action(self, action_values: List[ActionType]) -> (ActionType, List[float]):
if self.phase == RunPhase.TRAIN:
self.temperature_schedule.step()
# softmax calculation
exp_probabilities = np.exp(action_values / self.temperature_schedule.current_value)
probabilities = exp_probabilities / np.sum(exp_probabilities)
# make sure probs sum to 1
probabilities[-1] = 1 - np.sum(probabilities[:-1])
# choose actions according to the probabilities
action = np.random.choice(range(self.action_space.shape), p=probabilities)
return action, probabilities
def get_control_param(self):
return self.temperature_schedule.current_value | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/boltzmann.py | 0.918521 | 0.525978 | boltzmann.py | pypi |
from typing import List
import numpy as np
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.e_greedy import EGreedy, EGreedyParameters
from rl_coach.exploration_policies.exploration_policy import ExplorationParameters
from rl_coach.schedules import Schedule, LinearSchedule
from rl_coach.spaces import ActionSpace
class BootstrappedParameters(EGreedyParameters):
def __init__(self):
super().__init__()
self.architecture_num_q_heads = 10
self.bootstrapped_data_sharing_probability = 1.0
self.epsilon_schedule = LinearSchedule(1, 0.01, 1000000)
@property
def path(self):
return 'rl_coach.exploration_policies.bootstrapped:Bootstrapped'
class Bootstrapped(EGreedy):
"""
Bootstrapped exploration policy is currently only used for discrete action spaces along with the
Bootstrapped DQN agent. It assumes that there is an ensemble of network heads, where each one predicts the
values for all the possible actions. For each episode, a single head is selected to lead the agent, according
to its value predictions. In evaluation, the action is selected using a majority vote over all the heads
predictions.
.. note::
This exploration policy will only work for Discrete action spaces with Bootstrapped DQN style agents,
since it requires the agent to have a network with multiple heads.
"""
def __init__(self, action_space: ActionSpace, epsilon_schedule: Schedule, evaluation_epsilon: float,
architecture_num_q_heads: int,
continuous_exploration_policy_parameters: ExplorationParameters = AdditiveNoiseParameters(),):
"""
:param action_space: the action space used by the environment
:param epsilon_schedule: a schedule for the epsilon values
:param evaluation_epsilon: the epsilon value to use for evaluation phases
:param continuous_exploration_policy_parameters: the parameters of the continuous exploration policy to use
if the e-greedy is used for a continuous policy
:param architecture_num_q_heads: the number of q heads to select from
"""
super().__init__(action_space, epsilon_schedule, evaluation_epsilon, continuous_exploration_policy_parameters)
self.num_heads = architecture_num_q_heads
self.selected_head = 0
self.last_action_values = 0
def select_head(self):
self.selected_head = np.random.randint(self.num_heads)
def get_action(self, action_values: List[ActionType]) -> ActionType:
# action values are none in case the exploration policy is going to select a random action
if action_values is not None:
if self.phase == RunPhase.TRAIN:
action_values = action_values[self.selected_head]
else:
# ensemble voting for evaluation
top_action_votings = np.argmax(action_values, axis=-1)
counts = np.bincount(top_action_votings.squeeze())
top_action = np.argmax(counts)
# convert the top action to a one hot vector and pass it to e-greedy
action_values = np.eye(len(self.action_space.actions))[[top_action]]
self.last_action_values = action_values
return super().get_action(action_values)
def get_control_param(self):
return self.selected_head | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/bootstrapped.py | 0.932584 | 0.577972 | bootstrapped.py | pypi |
from typing import List
from rl_coach.base_parameters import Parameters
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.spaces import ActionSpace, DiscreteActionSpace, BoxActionSpace, GoalsSpace
class ExplorationParameters(Parameters):
def __init__(self):
self.action_space = None
@property
def path(self):
return 'rl_coach.exploration_policies.exploration_policy:ExplorationPolicy'
class ExplorationPolicy(object):
"""
An exploration policy takes the predicted actions or action values from the agent, and selects the action to
actually apply to the environment using some predefined algorithm.
"""
def __init__(self, action_space: ActionSpace):
"""
:param action_space: the action space used by the environment
"""
self.phase = RunPhase.HEATUP
self.action_space = action_space
def reset(self):
"""
Used for resetting the exploration policy parameters when needed
:return: None
"""
pass
def get_action(self, action_values: List[ActionType]) -> ActionType:
"""
Given a list of values corresponding to each action,
choose one actions according to the exploration policy
:param action_values: A list of action values
:return: The chosen action,
The probability of the action (if available, otherwise 1 for absolute certainty in the action)
"""
raise NotImplementedError()
def change_phase(self, phase):
"""
Change between running phases of the algorithm
:param phase: Either Heatup or Train
:return: none
"""
self.phase = phase
def requires_action_values(self) -> bool:
"""
Allows exploration policies to define if they require the action values for the current step.
This can save up a lot of computation. For example in e-greedy, if the random value generated is smaller
than epsilon, the action is completely random, and the action values don't need to be calculated
:return: True if the action values are required. False otherwise
"""
return True
def get_control_param(self):
return 0
class DiscreteActionExplorationPolicy(ExplorationPolicy):
"""
A discrete action exploration policy.
"""
def __init__(self, action_space: ActionSpace):
"""
:param action_space: the action space used by the environment
"""
assert isinstance(action_space, DiscreteActionSpace)
super().__init__(action_space)
def get_action(self, action_values: List[ActionType]) -> (ActionType, List):
"""
Given a list of values corresponding to each action,
choose one actions according to the exploration policy
:param action_values: A list of action values
:return: The chosen action,
The probabilities of actions to select from (if not available a one-hot vector)
"""
if self.__class__ == ExplorationPolicy:
raise ValueError("The ExplorationPolicy class is an abstract class and should not be used directly. "
"Please set the exploration parameters to point to an inheriting class like EGreedy or "
"AdditiveNoise")
else:
raise ValueError("The get_action function should be overridden in the inheriting exploration class")
class ContinuousActionExplorationPolicy(ExplorationPolicy):
"""
A continuous action exploration policy.
"""
def __init__(self, action_space: ActionSpace):
"""
:param action_space: the action space used by the environment
"""
assert isinstance(action_space, BoxActionSpace) or \
(hasattr(action_space, 'filtered_action_space') and
isinstance(action_space.filtered_action_space, BoxActionSpace)) or \
isinstance(action_space, GoalsSpace)
super().__init__(action_space) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/exploration_policy.py | 0.91633 | 0.568655 | exploration_policy.py | pypi |
from typing import List
import numpy as np
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.exploration_policies.exploration_policy import ContinuousActionExplorationPolicy, ExplorationParameters
from rl_coach.spaces import ActionSpace, BoxActionSpace, GoalsSpace
# Based on on the description in:
# https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OUProcessParameters(ExplorationParameters):
def __init__(self):
super().__init__()
self.mu = 0
self.theta = 0.15
self.sigma = 0.2
self.dt = 0.01
@property
def path(self):
return 'rl_coach.exploration_policies.ou_process:OUProcess'
# Ornstein-Uhlenbeck process
class OUProcess(ContinuousActionExplorationPolicy):
"""
OUProcess exploration policy is intended for continuous action spaces, and selects the action according to
an Ornstein-Uhlenbeck process. The Ornstein-Uhlenbeck process implements the action as a Gaussian process, where
the samples are correlated between consequent time steps.
"""
def __init__(self, action_space: ActionSpace, mu: float=0, theta: float=0.15, sigma: float=0.2, dt: float=0.01):
"""
:param action_space: the action space used by the environment
"""
super().__init__(action_space)
self.mu = float(mu) * np.ones(self.action_space.shape)
self.theta = float(theta)
self.sigma = float(sigma) * np.ones(self.action_space.shape)
self.state = np.zeros(self.action_space.shape)
self.dt = dt
def reset(self):
self.state = np.zeros(self.action_space.shape)
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.random.randn(len(x)) * np.sqrt(self.dt)
self.state = x + dx
return self.state
def get_action(self, action_values: List[ActionType]) -> ActionType:
if self.phase == RunPhase.TRAIN:
noise = self.noise()
else:
noise = np.zeros(self.action_space.shape)
action = action_values.squeeze() + noise
return action
def get_control_param(self):
if self.phase == RunPhase.TRAIN:
return self.state
else:
return np.zeros(self.action_space.shape) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/ou_process.py | 0.905989 | 0.403332 | ou_process.py | pypi |
from typing import List
import numpy as np
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.exploration_policies.exploration_policy import ContinuousActionExplorationPolicy, ExplorationParameters
from rl_coach.schedules import Schedule, LinearSchedule
from rl_coach.spaces import ActionSpace, BoxActionSpace
# TODO: consider renaming to gaussian sampling
class AdditiveNoiseParameters(ExplorationParameters):
def __init__(self):
super().__init__()
self.noise_schedule = LinearSchedule(0.1, 0.1, 50000)
self.evaluation_noise = 0.05
self.noise_as_percentage_from_action_space = True
@property
def path(self):
return 'rl_coach.exploration_policies.additive_noise:AdditiveNoise'
class AdditiveNoise(ContinuousActionExplorationPolicy):
"""
AdditiveNoise is an exploration policy intended for continuous action spaces. It takes the action from the agent
and adds a Gaussian distributed noise to it. The amount of noise added to the action follows the noise amount that
can be given in two different ways:
1. Specified by the user as a noise schedule which is taken in percentiles out of the action space size
2. Specified by the agents action. In case the agents action is a list with 2 values, the 1st one is assumed to
be the mean of the action, and 2nd is assumed to be its standard deviation.
"""
def __init__(self, action_space: ActionSpace, noise_schedule: Schedule,
evaluation_noise: float, noise_as_percentage_from_action_space: bool = True):
"""
:param action_space: the action space used by the environment
:param noise_schedule: the schedule for the noise
:param evaluation_noise: the noise variance that will be used during evaluation phases
:param noise_as_percentage_from_action_space: a bool deciding whether the noise is absolute or as a percentage
from the action space
"""
super().__init__(action_space)
self.noise_schedule = noise_schedule
self.evaluation_noise = evaluation_noise
self.noise_as_percentage_from_action_space = noise_as_percentage_from_action_space
if not isinstance(action_space, BoxActionSpace) and \
(hasattr(action_space, 'filtered_action_space') and not
isinstance(action_space.filtered_action_space, BoxActionSpace)):
raise ValueError("Additive noise exploration works only for continuous controls."
"The given action space is of type: {}".format(action_space.__class__.__name__))
if not np.all(-np.inf < action_space.high) or not np.all(action_space.high < np.inf)\
or not np.all(-np.inf < action_space.low) or not np.all(action_space.low < np.inf):
raise ValueError("Additive noise exploration requires bounded actions")
def get_action(self, action_values: List[ActionType]) -> ActionType:
# TODO-potential-bug consider separating internally defined stdev and externally defined stdev into 2 policies
# set the current noise
if self.phase == RunPhase.TEST:
current_noise = self.evaluation_noise
else:
current_noise = self.noise_schedule.current_value
# scale the noise to the action space range
if self.noise_as_percentage_from_action_space:
action_values_std = current_noise * (self.action_space.high - self.action_space.low)
else:
action_values_std = current_noise
# extract the mean values
if isinstance(action_values, list):
# the action values are expected to be a list with the action mean and optionally the action stdev
action_values_mean = action_values[0].squeeze()
else:
# the action values are expected to be a numpy array representing the action mean
action_values_mean = action_values.squeeze()
# step the noise schedule
if self.phase is not RunPhase.TEST:
self.noise_schedule.step()
# the second element of the list is assumed to be the standard deviation
if isinstance(action_values, list) and len(action_values) > 1:
action_values_std = action_values[1].squeeze()
# add noise to the action means
if self.phase is not RunPhase.TEST:
action = np.random.normal(action_values_mean, action_values_std)
else:
action = action_values_mean
return np.atleast_1d(action)
def get_control_param(self):
return np.ones(self.action_space.shape)*self.noise_schedule.current_value | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/additive_noise.py | 0.6508 | 0.612049 | additive_noise.py | pypi |
from typing import List, Dict
import numpy as np
from rl_coach.architectures.layers import NoisyNetDense
from rl_coach.base_parameters import AgentParameters, NetworkParameters
from rl_coach.spaces import ActionSpace, BoxActionSpace, DiscreteActionSpace
from rl_coach.core_types import ActionType
from rl_coach.exploration_policies.exploration_policy import ExplorationPolicy, ExplorationParameters
class ParameterNoiseParameters(ExplorationParameters):
def __init__(self, agent_params: AgentParameters):
super().__init__()
if not agent_params.algorithm.supports_parameter_noise:
raise ValueError("Currently only DQN variants are supported for using an exploration type of "
"ParameterNoise.")
self.network_params = agent_params.network_wrappers
@property
def path(self):
return 'rl_coach.exploration_policies.parameter_noise:ParameterNoise'
class ParameterNoise(ExplorationPolicy):
"""
The ParameterNoise exploration policy is intended for both discrete and continuous action spaces.
It applies the exploration policy by replacing all the dense network layers with noisy layers.
The noisy layers have both weight means and weight standard deviations, and for each forward pass of the network
the weights are sampled from a normal distribution that follows the learned weights mean and standard deviation
values.
Warning: currently supported only by DQN variants
"""
def __init__(self, network_params: Dict[str, NetworkParameters], action_space: ActionSpace):
"""
:param action_space: the action space used by the environment
"""
super().__init__(action_space)
self.network_params = network_params
self._replace_network_dense_layers()
def get_action(self, action_values: List[ActionType]):
if type(self.action_space) == DiscreteActionSpace:
action = np.argmax(action_values)
one_hot_action_probabilities = np.zeros(len(self.action_space.actions))
one_hot_action_probabilities[action] = 1
return action, one_hot_action_probabilities
elif type(self.action_space) == BoxActionSpace:
action_values_mean = action_values[0].squeeze()
action_values_std = action_values[1].squeeze()
return np.random.normal(action_values_mean, action_values_std)
else:
raise ValueError("ActionSpace type {} is not supported for ParameterNoise.".format(type(self.action_space)))
def get_control_param(self):
return 0
def _replace_network_dense_layers(self):
# replace the dense type for all the networks components (embedders, mw, heads) with a NoisyNetDense
# NOTE: we are changing network params in a non-params class (an already instantiated class), this could have
# been prone to a bug, but since the networks are created very late in the game
# (after agent.init_environment_dependent()_modules is called) - then we are fine.
for network_wrapper_params in self.network_params.values():
for component_params in list(network_wrapper_params.input_embedders_parameters.values()) + \
[network_wrapper_params.middleware_parameters] + \
network_wrapper_params.heads_parameters:
component_params.dense_layer = NoisyNetDense | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/parameter_noise.py | 0.946886 | 0.576125 | parameter_noise.py | pypi |
from typing import List
import numpy as np
from scipy.stats import truncnorm
from rl_coach.core_types import RunPhase, ActionType
from rl_coach.exploration_policies.exploration_policy import ExplorationParameters, ContinuousActionExplorationPolicy
from rl_coach.schedules import Schedule, LinearSchedule
from rl_coach.spaces import ActionSpace, BoxActionSpace
class TruncatedNormalParameters(ExplorationParameters):
def __init__(self):
super().__init__()
self.noise_schedule = LinearSchedule(0.1, 0.1, 50000)
self.evaluation_noise = 0.05
self.clip_low = 0
self.clip_high = 1
self.noise_as_percentage_from_action_space = True
@property
def path(self):
return 'rl_coach.exploration_policies.truncated_normal:TruncatedNormal'
class TruncatedNormal(ContinuousActionExplorationPolicy):
"""
The TruncatedNormal exploration policy is intended for continuous action spaces. It samples the action from a
normal distribution, where the mean action is given by the agent, and the standard deviation can be given in t
wo different ways:
1. Specified by the user as a noise schedule which is taken in percentiles out of the action space size
2. Specified by the agents action. In case the agents action is a list with 2 values, the 1st one is assumed to
be the mean of the action, and 2nd is assumed to be its standard deviation.
When the sampled action is outside of the action bounds given by the user, it is sampled again and again, until it
is within the bounds.
"""
def __init__(self, action_space: ActionSpace, noise_schedule: Schedule,
evaluation_noise: float, clip_low: float, clip_high: float,
noise_as_percentage_from_action_space: bool = True):
"""
:param action_space: the action space used by the environment
:param noise_schedule: the schedule for the noise variance
:param evaluation_noise: the noise variance that will be used during evaluation phases
:param noise_as_percentage_from_action_space: whether to consider the noise as a percentage of the action space
or absolute value
"""
super().__init__(action_space)
self.noise_schedule = noise_schedule
self.evaluation_noise = evaluation_noise
self.noise_as_percentage_from_action_space = noise_as_percentage_from_action_space
self.clip_low = clip_low
self.clip_high = clip_high
if not isinstance(action_space, BoxActionSpace):
raise ValueError("Truncated normal exploration works only for continuous controls."
"The given action space is of type: {}".format(action_space.__class__.__name__))
if not np.all(-np.inf < action_space.high) or not np.all(action_space.high < np.inf)\
or not np.all(-np.inf < action_space.low) or not np.all(action_space.low < np.inf):
raise ValueError("Additive noise exploration requires bounded actions")
def get_action(self, action_values: List[ActionType]) -> ActionType:
# set the current noise
if self.phase == RunPhase.TEST:
current_noise = self.evaluation_noise
else:
current_noise = self.noise_schedule.current_value
# scale the noise to the action space range
if self.noise_as_percentage_from_action_space:
action_values_std = current_noise * (self.action_space.high - self.action_space.low)
else:
action_values_std = current_noise
# extract the mean values
if isinstance(action_values, list):
# the action values are expected to be a list with the action mean and optionally the action stdev
action_values_mean = action_values[0].squeeze()
else:
# the action values are expected to be a numpy array representing the action mean
action_values_mean = action_values.squeeze()
# step the noise schedule
if self.phase is not RunPhase.TEST:
self.noise_schedule.step()
# the second element of the list is assumed to be the standard deviation
if isinstance(action_values, list) and len(action_values) > 1:
action_values_std = action_values[1].squeeze()
# sample from truncated normal distribution
normalized_low = (self.clip_low - action_values_mean) / action_values_std
normalized_high = (self.clip_high - action_values_mean) / action_values_std
distribution = truncnorm(normalized_low, normalized_high, loc=action_values_mean, scale=action_values_std)
action = distribution.rvs(1)
return action
def get_control_param(self):
return np.ones(self.action_space.shape)*self.noise_schedule.current_value | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/exploration_policies/truncated_normal.py | 0.927593 | 0.637313 | truncated_normal.py | pypi |
from enum import Enum
from typing import Union, List
import numpy as np
from rl_coach.filters.observation.observation_move_axis_filter import ObservationMoveAxisFilter
try:
from pysc2 import maps
from pysc2.env import sc2_env
from pysc2.env import available_actions_printer
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.env import environment
from absl import app
from absl import flags
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("PySc2")
from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, PlanarMapsObservationSpace, StateSpace, CompoundActionSpace, \
DiscreteActionSpace
from rl_coach.filters.filter import InputFilter, OutputFilter
from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter
from rl_coach.filters.action.linear_box_to_box_map import LinearBoxToBoxMap
from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter
FLAGS = flags.FLAGS
FLAGS(['coach.py'])
SCREEN_SIZE = 84 # will also impact the action space size
# Starcraft Constants
_NOOP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class StarcraftObservationType(Enum):
Features = 0
RGB = 1
StarcraftInputFilter = InputFilter(is_a_reference_filter=True)
StarcraftInputFilter.add_observation_filter('screen', 'move_axis', ObservationMoveAxisFilter(0, -1))
StarcraftInputFilter.add_observation_filter('screen', 'rescaling',
ObservationRescaleToSizeFilter(
PlanarMapsObservationSpace(np.array([84, 84, 1]),
low=0, high=255, channels_axis=-1)))
StarcraftInputFilter.add_observation_filter('screen', 'to_uint8', ObservationToUInt8Filter(0, 255))
StarcraftInputFilter.add_observation_filter('minimap', 'move_axis', ObservationMoveAxisFilter(0, -1))
StarcraftInputFilter.add_observation_filter('minimap', 'rescaling',
ObservationRescaleToSizeFilter(
PlanarMapsObservationSpace(np.array([64, 64, 1]),
low=0, high=255, channels_axis=-1)))
StarcraftInputFilter.add_observation_filter('minimap', 'to_uint8', ObservationToUInt8Filter(0, 255))
StarcraftNormalizingOutputFilter = OutputFilter(is_a_reference_filter=True)
StarcraftNormalizingOutputFilter.add_action_filter(
'normalization', LinearBoxToBoxMap(input_space_low=-SCREEN_SIZE / 2, input_space_high=SCREEN_SIZE / 2 - 1))
class StarCraft2EnvironmentParameters(EnvironmentParameters):
def __init__(self, level=None):
super().__init__(level=level)
self.screen_size = 84
self.minimap_size = 64
self.feature_minimap_maps_to_use = range(7)
self.feature_screen_maps_to_use = range(17)
self.observation_type = StarcraftObservationType.Features
self.disable_fog = False
self.auto_select_all_army = True
self.default_input_filter = StarcraftInputFilter
self.default_output_filter = StarcraftNormalizingOutputFilter
self.use_full_action_space = False
@property
def path(self):
return 'rl_coach.environments.starcraft2_environment:StarCraft2Environment'
# Environment
class StarCraft2Environment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False,
custom_reward_threshold: Union[int, float]=None,
screen_size: int=84, minimap_size: int=64,
feature_minimap_maps_to_use: List=range(7), feature_screen_maps_to_use: List=range(17),
observation_type: StarcraftObservationType=StarcraftObservationType.Features,
disable_fog: bool=False, auto_select_all_army: bool=True,
use_full_action_space: bool=False, **kwargs):
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
self.screen_size = screen_size
self.minimap_size = minimap_size
self.feature_minimap_maps_to_use = feature_minimap_maps_to_use
self.feature_screen_maps_to_use = feature_screen_maps_to_use
self.observation_type = observation_type
self.features_screen_size = None
self.feature_minimap_size = None
self.rgb_screen_size = None
self.rgb_minimap_size = None
if self.observation_type == StarcraftObservationType.Features:
self.features_screen_size = screen_size
self.feature_minimap_size = minimap_size
elif self.observation_type == StarcraftObservationType.RGB:
self.rgb_screen_size = screen_size
self.rgb_minimap_size = minimap_size
self.disable_fog = disable_fog
self.auto_select_all_army = auto_select_all_army
self.use_full_action_space = use_full_action_space
# step_mul is the equivalent to frame skipping. Not sure if it repeats actions in between or not though.
self.env = sc2_env.SC2Env(map_name=self.env_id, step_mul=frame_skip,
visualize=self.is_rendered,
agent_interface_format=sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=self.features_screen_size,
minimap=self.feature_minimap_size
)
# rgb_dimensions=sc2_env.Dimensions(
# screen=self.rgb_screen_size,
# minimap=self.rgb_screen_size
# )
),
# feature_screen_size=self.features_screen_size,
# feature_minimap_size=self.feature_minimap_size,
# rgb_screen_size=self.rgb_screen_size,
# rgb_minimap_size=self.rgb_screen_size,
disable_fog=disable_fog,
random_seed=self.seed
)
# print all the available actions
# self.env = available_actions_printer.AvailableActionsPrinter(self.env)
self.reset_internal_state(True)
"""
feature_screen: [height_map, visibility_map, creep, power, player_id, player_relative, unit_type, selected,
unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields,
unit_shields_ratio, unit_density, unit_density_aa, effects]
feature_minimap: [height_map, visibility_map, creep, camera, player_id, player_relative, selecte
d]
player: [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount,
army_count, warp_gate_count, larva_count]
"""
self.screen_shape = np.array(self.env.observation_spec()[0]['feature_screen'])
self.screen_shape[0] = len(self.feature_screen_maps_to_use)
self.minimap_shape = np.array(self.env.observation_spec()[0]['feature_minimap'])
self.minimap_shape[0] = len(self.feature_minimap_maps_to_use)
self.state_space = StateSpace({
"screen": PlanarMapsObservationSpace(shape=self.screen_shape, low=0, high=255, channels_axis=0),
"minimap": PlanarMapsObservationSpace(shape=self.minimap_shape, low=0, high=255, channels_axis=0),
"measurements": VectorObservationSpace(self.env.observation_spec()[0]["player"][0])
})
if self.use_full_action_space:
action_identifiers = list(self.env.action_spec()[0].functions)
num_action_identifiers = len(action_identifiers)
action_arguments = [(arg.name, arg.sizes) for arg in self.env.action_spec()[0].types]
sub_action_spaces = [DiscreteActionSpace(num_action_identifiers)]
for argument in action_arguments:
for dimension in argument[1]:
sub_action_spaces.append(DiscreteActionSpace(dimension))
self.action_space = CompoundActionSpace(sub_action_spaces)
else:
self.action_space = BoxActionSpace(2, 0, self.screen_size - 1, ["X-Axis, Y-Axis"],
default_action=np.array([self.screen_size/2, self.screen_size/2]))
self.target_success_rate = target_success_rate
def _update_state(self):
timestep = 0
self.screen = self.last_result[timestep].observation.feature_screen
# extract only the requested segmentation maps from the observation
self.screen = np.take(self.screen, self.feature_screen_maps_to_use, axis=0)
self.minimap = self.last_result[timestep].observation.feature_minimap
self.measurements = self.last_result[timestep].observation.player
self.reward = self.last_result[timestep].reward
self.done = self.last_result[timestep].step_type == environment.StepType.LAST
self.state = {
'screen': self.screen,
'minimap': self.minimap,
'measurements': self.measurements
}
def _take_action(self, action):
if self.use_full_action_space:
action_identifier = action[0]
action_arguments = action[1:]
action = actions.FunctionCall(action_identifier, action_arguments)
else:
coord = np.array(action[0:2])
noop = False
coord = coord.round()
coord = np.clip(coord, 0, SCREEN_SIZE - 1)
self.last_action_idx = coord
if noop:
action = actions.FunctionCall(_NOOP, [])
else:
action = actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord])
self.last_result = self.env.step(actions=[action])
def _restart_environment_episode(self, force_environment_reset=False):
# reset the environment
self.last_result = self.env.reset()
# select all the units on the screen
if self.auto_select_all_army:
self.env.step(actions=[actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])])
def get_rendered_image(self):
screen = np.squeeze(np.tile(np.expand_dims(self.screen, -1), (1, 1, 3)))
screen = screen / np.max(screen) * 255
return screen.astype('uint8')
def dump_video_of_last_episode(self):
from rl_coach.logger import experiment_path
self.env._run_config.replay_dir = experiment_path
self.env.save_replay('replays')
super().dump_video_of_last_episode()
def get_target_success_rate(self):
return self.target_success_rate | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/starcraft2_environment.py | 0.810629 | 0.387111 | starcraft2_environment.py | pypi |
import random
from enum import Enum
from typing import Union
import numpy as np
try:
from dm_control import suite
from dm_control.suite.wrappers import pixels
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("DeepMind Control Suite")
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection
from rl_coach.filters.filter import NoInputFilter, NoOutputFilter
from rl_coach.spaces import BoxActionSpace, ImageObservationSpace, VectorObservationSpace, StateSpace
class ObservationType(Enum):
Measurements = 1
Image = 2
Image_and_Measurements = 3
# Parameters
class ControlSuiteEnvironmentParameters(EnvironmentParameters):
def __init__(self, level=None):
super().__init__(level=level)
self.observation_type = ObservationType.Measurements
self.default_input_filter = ControlSuiteInputFilter
self.default_output_filter = ControlSuiteOutputFilter
@property
def path(self):
return 'rl_coach.environments.control_suite_environment:ControlSuiteEnvironment'
"""
ControlSuite Environment Components
"""
ControlSuiteInputFilter = NoInputFilter()
ControlSuiteOutputFilter = NoOutputFilter()
control_suite_envs = {':'.join(env): ':'.join(env) for env in suite.BENCHMARKING}
# Environment
class ControlSuiteEnvironment(Environment):
def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters,
target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False,
observation_type: ObservationType=ObservationType.Measurements,
custom_reward_threshold: Union[int, float]=None, **kwargs):
"""
:param level: (str)
A string representing the control suite level to run. This can also be a LevelSelection object.
For example, cartpole:swingup.
:param frame_skip: (int)
The number of frames to skip between any two actions given by the agent. The action will be repeated
for all the skipped frames.
:param visualization_parameters: (VisualizationParameters)
The parameters used for visualizing the environment, such as the render flag, storing videos etc.
:param target_success_rate: (float)
Stop experiment if given target success rate was achieved.
:param seed: (int)
A seed to use for the random number generator when running the environment.
:param human_control: (bool)
A flag that allows controlling the environment using the keyboard keys.
:param observation_type: (ObservationType)
An enum which defines which observation to use. The current options are to use:
* Measurements only - a vector of joint torques and similar measurements
* Image only - an image of the environment as seen by a camera attached to the simulator
* Measurements & Image - both type of observations will be returned in the state using the keys
'measurements' and 'pixels' respectively.
:param custom_reward_threshold: (float)
Allows defining a custom reward that will be used to decide when the agent succeeded in passing the environment.
"""
super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate)
self.observation_type = observation_type
# load and initialize environment
domain_name, task_name = self.env_id.split(":")
self.env = suite.load(domain_name=domain_name, task_name=task_name, task_kwargs={'random': seed})
if observation_type != ObservationType.Measurements:
self.env = pixels.Wrapper(self.env, pixels_only=observation_type == ObservationType.Image)
# seed
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
self.state_space = StateSpace({})
# image observations
if observation_type != ObservationType.Measurements:
self.state_space['pixels'] = ImageObservationSpace(shape=self.env.observation_spec()['pixels'].shape,
high=255)
# measurements observations
if observation_type != ObservationType.Image:
measurements_space_size = 0
measurements_names = []
for observation_space_name, observation_space in self.env.observation_spec().items():
if len(observation_space.shape) == 0:
measurements_space_size += 1
measurements_names.append(observation_space_name)
elif len(observation_space.shape) == 1:
measurements_space_size += observation_space.shape[0]
measurements_names.extend(["{}_{}".format(observation_space_name, i) for i in
range(observation_space.shape[0])])
self.state_space['measurements'] = VectorObservationSpace(shape=measurements_space_size,
measurements_names=measurements_names)
# actions
self.action_space = BoxActionSpace(
shape=self.env.action_spec().shape[0],
low=self.env.action_spec().minimum,
high=self.env.action_spec().maximum
)
# initialize the state by getting a new state from the environment
self.reset_internal_state(True)
# render
if self.is_rendered:
image = self.get_rendered_image()
scale = 1
if self.human_control:
scale = 2
if not self.native_rendering:
self.renderer.create_screen(image.shape[1]*scale, image.shape[0]*scale)
self.target_success_rate = target_success_rate
def _update_state(self):
self.state = {}
if self.observation_type != ObservationType.Measurements:
self.pixels = self.last_result.observation['pixels']
self.state['pixels'] = self.pixels
if self.observation_type != ObservationType.Image:
self.measurements = np.array([])
for sub_observation in self.last_result.observation.values():
if isinstance(sub_observation, np.ndarray) and len(sub_observation.shape) == 1:
self.measurements = np.concatenate((self.measurements, sub_observation))
else:
self.measurements = np.concatenate((self.measurements, np.array([sub_observation])))
self.state['measurements'] = self.measurements
self.reward = self.last_result.reward if self.last_result.reward is not None else 0
self.done = self.last_result.last()
def _take_action(self, action):
if type(self.action_space) == BoxActionSpace:
action = self.action_space.clip_action_to_space(action)
self.last_result = self.env.step(action)
def _restart_environment_episode(self, force_environment_reset=False):
self.last_result = self.env.reset()
def _render(self):
pass
def get_rendered_image(self):
return self.env.physics.render(camera_id=0)
def get_target_success_rate(self) -> float:
return self.target_success_rate | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/control_suite_environment.py | 0.892217 | 0.541833 | control_suite_environment.py | pypi |
from typing import Union, Dict
from rl_coach.core_types import ActionType, EnvResponse, RunPhase
from rl_coach.spaces import ActionSpace
class EnvironmentInterface(object):
def __init__(self):
self._phase = RunPhase.UNDEFINED
@property
def phase(self) -> RunPhase:
"""
Get the phase of the environment
:return: the current phase
"""
return self._phase
@phase.setter
def phase(self, val: RunPhase):
"""
Change the phase of the environment
:param val: the new phase
:return: None
"""
self._phase = val
@property
def action_space(self) -> Union[Dict[str, ActionSpace], ActionSpace]:
"""
Get the action space of the environment (or of each of the agents wrapped in this environment.
i.e. in the LevelManager case")
:return: the action space
"""
raise NotImplementedError("")
def get_random_action(self) -> ActionType:
"""
Get a random action from the environment action space
:return: An action that follows the definition of the action space.
"""
raise NotImplementedError("")
def step(self, action: ActionType) -> Union[None, EnvResponse]:
"""
Make a single step in the environment using the given action
:param action: an action to use for stepping the environment. Should follow the definition of the action space.
:return: the environment response as returned in get_last_env_response or None for LevelManager
"""
raise NotImplementedError("")
def reset_internal_state(self, force_environment_reset: bool=False) -> Union[None, EnvResponse]:
"""
Reset the environment episode
:param force_environment_reset: in some cases, resetting the environment can be suppressed by the environment
itself. This flag allows force the reset.
:return: the environment response as returned in get_last_env_response or None for LevelManager
"""
raise NotImplementedError("") | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/environment_interface.py | 0.926968 | 0.414899 | environment_interface.py | pypi |
import os
import gym
import numpy as np
from gym import spaces
from gym.envs.registration import EnvSpec
from mujoco_py import load_model_from_path, MjSim, MjViewer, MjRenderContextOffscreen
class PendulumWithGoals(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
}
def __init__(self, goal_reaching_thresholds=np.array([0.075, 0.075, 0.75]),
goal_not_reached_penalty=-1, goal_reached_reward=0, terminate_on_goal_reaching=True,
time_limit=1000, frameskip=1, random_goals_instead_of_standing_goal=False,
polar_coordinates: bool=False):
super().__init__()
dir = os.path.dirname(__file__)
model = load_model_from_path(dir + "/pendulum_with_goals.xml")
self.sim = MjSim(model)
self.viewer = None
self.rgb_viewer = None
self.frameskip = frameskip
self.goal = None
self.goal_reaching_thresholds = goal_reaching_thresholds
self.goal_not_reached_penalty = goal_not_reached_penalty
self.goal_reached_reward = goal_reached_reward
self.terminate_on_goal_reaching = terminate_on_goal_reaching
self.time_limit = time_limit
self.current_episode_steps_counter = 0
self.random_goals_instead_of_standing_goal = random_goals_instead_of_standing_goal
self.polar_coordinates = polar_coordinates
# spaces definition
self.action_space = spaces.Box(low=-self.sim.model.actuator_ctrlrange[:, 1],
high=self.sim.model.actuator_ctrlrange[:, 1],
dtype=np.float32)
if self.polar_coordinates:
self.observation_space = spaces.Dict({
"observation": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32),
"desired_goal": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32),
"achieved_goal": spaces.Box(low=np.array([-np.pi, -15]),
high=np.array([np.pi, 15]),
dtype=np.float32)
})
else:
self.observation_space = spaces.Dict({
"observation": spaces.Box(low=np.array([-1, -1, -15]),
high=np.array([1, 1, 15]),
dtype=np.float32),
"desired_goal": spaces.Box(low=np.array([-1, -1, -15]),
high=np.array([1, 1, 15]),
dtype=np.float32),
"achieved_goal": spaces.Box(low=np.array([-1, -1, -15]),
high=np.array([1, 1, 15]),
dtype=np.float32)
})
self.spec = EnvSpec('PendulumWithGoals-v0')
self.spec.reward_threshold = self.goal_not_reached_penalty * self.time_limit
self.reset()
def _goal_reached(self):
observation = self._get_obs()
if np.any(np.abs(observation['achieved_goal'] - observation['desired_goal']) > self.goal_reaching_thresholds):
return False
else:
return True
def _terminate(self):
if (self._goal_reached() and self.terminate_on_goal_reaching) or \
self.current_episode_steps_counter >= self.time_limit:
return True
else:
return False
def _reward(self):
if self._goal_reached():
return self.goal_reached_reward
else:
return self.goal_not_reached_penalty
def step(self, action):
self.sim.data.ctrl[:] = action
for _ in range(self.frameskip):
self.sim.step()
self.current_episode_steps_counter += 1
state = self._get_obs()
# visualize the angular velocities
state_velocity = np.copy(state['observation'][-1] / 20)
goal_velocity = self.goal[-1] / 20
self.sim.model.site_size[2] = np.array([0.01, 0.01, state_velocity])
self.sim.data.mocap_pos[2] = np.array([0.85, 0, 0.75 + state_velocity])
self.sim.model.site_size[3] = np.array([0.01, 0.01, goal_velocity])
self.sim.data.mocap_pos[3] = np.array([1.15, 0, 0.75 + goal_velocity])
return state, self._reward(), self._terminate(), {}
def _get_obs(self):
"""
y
^
|____
| /
| /
|~/
|/
--------> x
"""
# observation
angle = self.sim.data.qpos
angular_velocity = self.sim.data.qvel
if self.polar_coordinates:
observation = np.concatenate([angle - np.pi, angular_velocity])
else:
x = np.sin(angle)
y = np.cos(angle) # qpos is the angle relative to a standing pole
observation = np.concatenate([x, y, angular_velocity])
return {
"observation": observation,
"desired_goal": self.goal,
"achieved_goal": observation
}
def reset(self):
self.current_episode_steps_counter = 0
# set initial state
angle = np.random.uniform(np.pi / 4, 7 * np.pi / 4)
angular_velocity = np.random.uniform(-0.05, 0.05)
self.sim.data.qpos[0] = angle
self.sim.data.qvel[0] = angular_velocity
self.sim.step()
# goal
if self.random_goals_instead_of_standing_goal:
angle_target = np.random.uniform(-np.pi / 8, np.pi / 8)
angular_velocity_target = np.random.uniform(-0.2, 0.2)
else:
angle_target = 0
angular_velocity_target = 0
# convert target values to goal
x_target = np.sin(angle_target)
y_target = np.cos(angle_target)
if self.polar_coordinates:
self.goal = np.array([angle_target - np.pi, angular_velocity_target])
else:
self.goal = np.array([x_target, y_target, angular_velocity_target])
# visualize the goal
self.sim.data.mocap_pos[0] = [x_target, 0, y_target]
return self._get_obs()
def render(self, mode='human', close=False):
if mode == 'human':
if self.viewer is None:
self.viewer = MjViewer(self.sim)
self.viewer.render()
elif mode == 'rgb_array':
if self.rgb_viewer is None:
self.rgb_viewer = MjRenderContextOffscreen(self.sim, 0)
self.rgb_viewer.render(500, 500)
# window size used for old mujoco-py:
data = self.rgb_viewer.read_pixels(500, 500, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :] | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/mujoco/pendulum_with_goals.py | 0.641198 | 0.317413 | pendulum_with_goals.py | pypi |
import random
import gym
import numpy as np
from gym import spaces
class BitFlip(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
}
def __init__(self, bit_length=16, max_steps=None, mean_zero=False):
super(BitFlip, self).__init__()
if bit_length < 1:
raise ValueError('bit_length must be >= 1, found {}'.format(bit_length))
self.bit_length = bit_length
self.mean_zero = mean_zero
if max_steps is None:
# default to bit_length
self.max_steps = bit_length
elif max_steps == 0:
self.max_steps = None
else:
self.max_steps = max_steps
# spaces documentation: https://gym.openai.com/docs/
self.action_space = spaces.Discrete(bit_length)
self.observation_space = spaces.Dict({
'state': spaces.Box(low=0, high=1, shape=(bit_length, )),
'desired_goal': spaces.Box(low=0, high=1, shape=(bit_length, )),
'achieved_goal': spaces.Box(low=0, high=1, shape=(bit_length, ))
})
self.reset()
def _terminate(self):
return (self.state == self.goal).all() or self.steps >= self.max_steps
def _reward(self):
return -1 if (self.state != self.goal).any() else 0
def step(self, action):
# action is an int in the range [0, self.bit_length)
self.state[action] = int(not self.state[action])
self.steps += 1
return (self._get_obs(), self._reward(), self._terminate(), {})
def reset(self):
self.steps = 0
self.state = np.array([random.choice([1, 0]) for _ in range(self.bit_length)])
# make sure goal is not the initial state
self.goal = self.state
while (self.goal == self.state).all():
self.goal = np.array([random.choice([1, 0]) for _ in range(self.bit_length)])
return self._get_obs()
def _mean_zero(self, x):
if self.mean_zero:
return (x - 0.5) / 0.5
else:
return x
def _get_obs(self):
return {
'state': self._mean_zero(self.state),
'desired_goal': self._mean_zero(self.goal),
'achieved_goal': self._mean_zero(self.state)
}
def render(self, mode='human', close=False):
observation = np.zeros((20, 20 * self.bit_length, 3))
for bit_idx, (state_bit, goal_bit) in enumerate(zip(self.state, self.goal)):
# green if the bit matches
observation[:, bit_idx * 20:(bit_idx + 1) * 20, 1] = (state_bit == goal_bit) * 255
# red if the bit doesn't match
observation[:, bit_idx * 20:(bit_idx + 1) * 20, 0] = (state_bit != goal_bit) * 255
return observation | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/toy_problems/bit_flip.py | 0.712232 | 0.38604 | bit_flip.py | pypi |
from enum import Enum
import gym
import numpy as np
from gym import spaces
class ExplorationChain(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30
}
class ObservationType(Enum):
OneHot = 0
Therm = 1
def __init__(self, chain_length=16, start_state=1, max_steps=None, observation_type=ObservationType.Therm,
left_state_reward=1/1000, right_state_reward=1, simple_render=True):
super().__init__()
if chain_length <= 3:
raise ValueError('Chain length must be > 3, found {}'.format(chain_length))
if not 0 <= start_state < chain_length:
raise ValueError('The start state should be within the chain bounds, found {}'.format(start_state))
self.chain_length = chain_length
self.start_state = start_state
self.max_steps = max_steps
self.observation_type = observation_type
self.left_state_reward = left_state_reward
self.right_state_reward = right_state_reward
self.simple_render = simple_render
# spaces documentation: https://gym.openai.com/docs/
self.action_space = spaces.Discrete(2) # 0 -> Go left, 1 -> Go right
self.observation_space = spaces.Box(0, 1, shape=(chain_length,))#spaces.MultiBinary(chain_length)
self.reset()
def _terminate(self):
return self.steps >= self.max_steps
def _reward(self):
if self.state == 0:
return self.left_state_reward
elif self.state == self.chain_length - 1:
return self.right_state_reward
else:
return 0
def step(self, action):
# action is 0 or 1
if action == 0:
if 0 < self.state:
self.state -= 1
elif action == 1:
if self.state < self.chain_length - 1:
self.state += 1
else:
raise ValueError("An invalid action was given. The available actions are - 0 or 1, found {}".format(action))
self.steps += 1
return self._get_obs(), self._reward(), self._terminate(), {}
def reset(self):
self.steps = 0
self.state = self.start_state
return self._get_obs()
def _get_obs(self):
self.observation = np.zeros((self.chain_length,))
if self.observation_type == self.ObservationType.OneHot:
self.observation[self.state] = 1
elif self.observation_type == self.ObservationType.Therm:
self.observation[:(self.state+1)] = 1
return self.observation
def render(self, mode='human', close=False):
if self.simple_render:
observation = np.zeros((20, 20*self.chain_length))
observation[:, self.state*20:(self.state+1)*20] = 255
return observation
else:
# lazy loading of networkx and matplotlib to allow using the environment without installing them if
# necessary
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
if not hasattr(self, 'G'):
self.states = list(range(self.chain_length))
self.G = nx.DiGraph(directed=True)
for i, origin_state in enumerate(self.states):
if i < self.chain_length - 1:
self.G.add_edge(origin_state,
origin_state + 1,
weight=0.5)
if i > 0:
self.G.add_edge(origin_state,
origin_state - 1,
weight=0.5, )
if i == 0 or i < self.chain_length - 1:
self.G.add_edge(origin_state,
origin_state,
weight=0.5, )
fig = plt.gcf()
if np.all(fig.get_size_inches() != [10, 2]):
fig.set_size_inches(5, 1)
color = ['y']*(len(self.G))
color[self.state] = 'r'
options = {
'node_color': color,
'node_size': 50,
'width': 1,
'arrowstyle': '-|>',
'arrowsize': 5,
'font_size': 6
}
pos = graphviz_layout(self.G, prog='dot', args='-Grankdir=LR')
nx.draw_networkx(self.G, pos, arrows=True, **options)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/environments/toy_problems/exploration_chain.py | 0.868172 | 0.521837 | exploration_chain.py | pypi |
from typing import Tuple, List
from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \
PresetValidationParameters
from rl_coach.environments.environment import EnvironmentParameters, Environment
from rl_coach.filters.filter import NoInputFilter, NoOutputFilter
from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters
from rl_coach.level_manager import LevelManager
from rl_coach.utils import short_dynamic_import
class BasicRLGraphManager(GraphManager):
"""
A basic RL graph manager creates the common scheme of RL where there is a single agent which interacts with a
single environment.
"""
def __init__(self, agent_params: AgentParameters, env_params: EnvironmentParameters,
schedule_params: ScheduleParameters,
vis_params: VisualizationParameters=VisualizationParameters(),
preset_validation_params: PresetValidationParameters = PresetValidationParameters(),
name='simple_rl_graph'):
super().__init__(name, schedule_params, vis_params)
self.agent_params = agent_params
self.env_params = env_params
self.preset_validation_params = preset_validation_params
self.agent_params.visualization = vis_params
if self.agent_params.input_filter is None:
if env_params is not None:
self.agent_params.input_filter = env_params.default_input_filter()
else:
# In cases where there is no environment (e.g. batch-rl and imitation learning), there is nowhere to get
# a default filter from. So using a default no-filter.
# When there is no environment, the user is expected to define input/output filters (if required) using
# the preset.
self.agent_params.input_filter = NoInputFilter()
if self.agent_params.output_filter is None:
if env_params is not None:
self.agent_params.output_filter = env_params.default_output_filter()
else:
self.agent_params.output_filter = NoOutputFilter()
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]:
# environment loading
self.env_params.seed = task_parameters.seed
self.env_params.experiment_path = task_parameters.experiment_path
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
# agent loading
self.agent_params.task_parameters = task_parameters # TODO: this should probably be passed in a different way
self.agent_params.name = "agent"
agent = short_dynamic_import(self.agent_params.path)(self.agent_params)
# set level manager
level_manager = LevelManager(agents=agent, environment=env, name="main_level")
return [level_manager], [env]
def log_signal(self, signal_name, value):
self.level_managers[0].agents['agent'].agent_logger.create_signal_value(signal_name, value)
def get_signal_value(self, signal_name):
return self.level_managers[0].agents['agent'].agent_logger.get_signal_value(signal_name)
def get_agent(self):
return self.level_managers[0].agents['agent'] | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/graph_managers/basic_rl_graph_manager.py | 0.698535 | 0.343424 | basic_rl_graph_manager.py | pypi |
from typing import List, Union, Tuple
from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \
PresetValidationParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.environments.environment import EnvironmentParameters, Environment
from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters
from rl_coach.level_manager import LevelManager
from rl_coach.utils import short_dynamic_import
class HACGraphManager(GraphManager):
"""
A simple HAC graph manager creates a deep hierarchy with a single agent per hierarchy level, and a single
environment (on the bottom layer) which is interacted with.
"""
def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters,
schedule_params: ScheduleParameters, vis_params: VisualizationParameters,
consecutive_steps_to_run_non_top_levels: Union[EnvironmentSteps, List[EnvironmentSteps]],
preset_validation_params: PresetValidationParameters = PresetValidationParameters()):
"""
:param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the
hierarchy to the bottom level
:param env_params: the parameters of the environment
:param schedule_params: the parameters for scheduling the graph
:param vis_params: the visualization parameters
:param consecutive_steps_to_run_non_top_levels: the number of time steps that each level is ran.
for example, when the top level gives the bottom level a goal, the bottom level can act for
consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either
an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list.
"""
super().__init__('hac_graph', schedule_params, vis_params)
self.agents_params = agents_params
self.env_params = env_params
self.preset_validation_params = preset_validation_params
self.should_test_current_sub_goal = None # will be filled by the top level agent, and is used by all levels
if isinstance(consecutive_steps_to_run_non_top_levels, list):
if len(consecutive_steps_to_run_non_top_levels) != len(self.agents_params):
raise ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match "
"the number of levels in the hierarchy. Alternatively, it is possible to use a single "
"value for all the levels, by passing an EnvironmentSteps")
elif isinstance(consecutive_steps_to_run_non_top_levels, EnvironmentSteps):
self.consecutive_steps_to_run_non_top_levels = consecutive_steps_to_run_non_top_levels
for agent_params in agents_params:
agent_params.visualization = self.visualization_parameters
if agent_params.input_filter is None:
agent_params.input_filter = self.env_params.default_input_filter()
if agent_params.output_filter is None:
agent_params.output_filter = self.env_params.default_output_filter()
if len(self.agents_params) < 2:
raise ValueError("The HAC graph manager must receive the agent parameters for at least two levels of the "
"hierarchy. Otherwise, use the basic RL graph manager.")
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]:
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
for agent_params in self.agents_params:
agent_params.task_parameters = task_parameters
# we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level
# to be known
level_managers = []
current_env = env
# out_action_space = env.action_space
for level_idx, agent_params in reversed(list(enumerate(self.agents_params))):
agent_params.name = "agent_{}".format(level_idx)
agent_params.is_a_highest_level_agent = level_idx == 0
agent_params.is_a_lowest_level_agent = level_idx == len(self.agents_params) - 1
agent = short_dynamic_import(agent_params.path)(agent_params)
level_manager = LevelManager(
agents=agent,
environment=current_env,
real_environment=env,
steps_limit=EnvironmentSteps(1) if level_idx == 0
else self.consecutive_steps_to_run_non_top_levels,
should_reset_agent_state_after_time_limit_passes=level_idx > 0,
name="level_{}".format(level_idx)
)
current_env = level_manager
level_managers.insert(0, level_manager)
return level_managers, [env] | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/graph_managers/hac_graph_manager.py | 0.893491 | 0.590366 | hac_graph_manager.py | pypi |
from typing import List, Type, Union
from rl_coach.base_parameters import MiddlewareScheme, NetworkComponentParameters
class MiddlewareParameters(NetworkComponentParameters):
def __init__(self, parameterized_class_name: str,
activation_function: str='relu', scheme: Union[List, MiddlewareScheme]=MiddlewareScheme.Medium,
batchnorm: bool=False, dropout_rate: float=0.0, name='middleware', dense_layer=None, is_training=False):
super().__init__(dense_layer=dense_layer)
self.activation_function = activation_function
self.scheme = scheme
self.batchnorm = batchnorm
self.dropout_rate = dropout_rate
self.name = name
self.is_training = is_training
self.parameterized_class_name = parameterized_class_name
@property
def path(self):
return 'rl_coach.architectures.tensorflow_components.middlewares:' + self.parameterized_class_name
class FCMiddlewareParameters(MiddlewareParameters):
def __init__(self, activation_function='relu',
scheme: Union[List, MiddlewareScheme] = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_fc_embedder", dense_layer=None, is_training=False, num_streams=1):
super().__init__(parameterized_class_name="FCMiddleware", activation_function=activation_function,
scheme=scheme, batchnorm=batchnorm, dropout_rate=dropout_rate, name=name, dense_layer=dense_layer,
is_training=is_training)
self.num_streams = num_streams
class LSTMMiddlewareParameters(MiddlewareParameters):
def __init__(self, activation_function='relu', number_of_lstm_cells=256,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_lstm_embedder", dense_layer=None, is_training=False):
super().__init__(parameterized_class_name="LSTMMiddleware", activation_function=activation_function,
scheme=scheme, batchnorm=batchnorm, dropout_rate=dropout_rate, name=name, dense_layer=dense_layer,
is_training=is_training)
self.number_of_lstm_cells = number_of_lstm_cells | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/middleware_parameters.py | 0.917168 | 0.209066 | middleware_parameters.py | pypi |
from typing import Any, Dict, List, Tuple
import numpy as np
from rl_coach.base_parameters import AgentParameters
from rl_coach.saver import SaverCollection
from rl_coach.spaces import SpacesDefinition
class Architecture(object):
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'Architecture':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: an object which is a child of Architecture
"""
raise NotImplementedError
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, name: str= ""):
"""
Creates a neural network 'architecture', that can be trained and used for inference.
:param agent_parameters: the agent parameters
:param spaces: the spaces (observation, action, etc.) definition of the agent
:param name: the name of the network
"""
self.spaces = spaces
self.name = name
self.network_wrapper_name = self.name.split('/')[0] # e.g. 'main/online' --> 'main'
self.full_name = "{}/{}".format(agent_parameters.full_name_id, name)
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
self.batch_size = self.network_parameters.batch_size
self.learning_rate = self.network_parameters.learning_rate
self.optimizer = None
self.ap = agent_parameters
def predict(self,
inputs: Dict[str, np.ndarray],
outputs: List[Any] = None,
squeeze_output: bool = True,
initial_feed_dict: Dict[Any, np.ndarray] = None) -> Tuple[np.ndarray, ...]:
"""
Given input observations, use the model to make predictions (e.g. action or value).
:param inputs: current state (i.e. observations, measurements, goals, etc.)
(e.g. `{'observation': numpy.ndarray}` of shape (batch_size, observation_space_size))
:param outputs: list of outputs to return. Return all outputs if unspecified. Type of the list elements
depends on the framework backend.
:param squeeze_output: call squeeze_list on output before returning if True
:param initial_feed_dict: a dictionary of extra inputs for forward pass.
:return: predictions of action or value of shape (batch_size, action_space_size) for action predictions)
"""
raise NotImplementedError
@staticmethod
def parallel_predict(sess: Any,
network_input_tuples: List[Tuple['Architecture', Dict[str, np.ndarray]]]) -> \
Tuple[np.ndarray, ...]:
"""
:param sess: active session to use for prediction
:param network_input_tuples: tuple of network and corresponding input
:return: list or tuple of outputs from all networks
"""
raise NotImplementedError
def train_on_batch(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
scaler: float=1.,
additional_fetches: list=None,
importance_weights: np.ndarray=None) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a
forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to
update the weights.
Calls `accumulate_gradients` followed by `apply_and_reset_gradients`.
Note: Currently an unused method.
:param inputs: typically the environment states (but can also contain other data necessary for loss).
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: target values of shape (batch_size, ). For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param scaler: value to scale gradients by before optimizing network weights
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list
of regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def get_weights(self) -> List[np.ndarray]:
"""
Gets model weights as a list of ndarrays. It is used for synchronizing weight between two identical networks.
:return: list weights as ndarray
"""
raise NotImplementedError
def set_weights(self, weights: List[np.ndarray], rate: float=1.0) -> None:
"""
Sets model weights for provided layer parameters.
:param weights: list of model weights in the same order as received in get_weights
:param rate: controls the mixture of given weight values versus old weight values.
i.e. new_weight = rate * given_weight + (1 - rate) * old_weight
:return: None
"""
raise NotImplementedError
def reset_accumulated_gradients(self) -> None:
"""
Sets gradient of all parameters to 0.
Once gradients are reset, they must be accessible by `accumulated_gradients` property of this class,
which must return a list of numpy ndarrays. Child class must ensure that `accumulated_gradients` is set.
"""
raise NotImplementedError
def accumulate_gradients(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
additional_fetches: list=None,
importance_weights: np.ndarray=None,
no_accumulation: bool=False) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (i.e. states) and targets (e.g. discounted rewards), computes and accumulates the
gradients for model parameters. Will run forward and backward pass to compute gradients, clip the gradient
values if required and then accumulate gradients from all learners. It does not update the model weights,
that's performed in `apply_and_reset_gradients` method.
Once gradients are accumulated, they are accessed by `accumulated_gradients` property of this class.å
:param inputs: typically the environment states (but can also contain other data for loss)
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: targets for calculating loss. For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously
calculated gradients
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list of
regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights and resets the gradient accumulations.
Has the same impact as calling `apply_gradients`, then `reset_accumulated_gradients`.
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights.
Will be performed sync or async depending on `network_parameters.async_training`
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def get_variable_value(self, variable: Any) -> np.ndarray:
"""
Gets value of a specified variable. Type of variable is dependant on the framework.
Example of a variable is head.kl_coefficient, which could be a symbol for evaluation
or could be a string representing the value.
:param variable: variable of interest
:return: value of the specified variable
"""
raise NotImplementedError
def set_variable_value(self, assign_op: Any, value: np.ndarray, placeholder: Any):
"""
Updates the value of a specified variable. Type of assign_op is dependant on the framework
and is a unique identifier for assigning value to a variable. For example an agent may use
head.assign_kl_coefficient. There is a one to one mapping between assign_op and placeholder
(in the example above, placeholder would be head.kl_coefficient_ph).
:param assign_op: a parameter representing the operation for assigning value to a specific variable
:param value: value of the specified variable used for update
:param placeholder: a placeholder for binding the value to assign_op.
"""
raise NotImplementedError
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collection of all savers for the network (typically only one saver for network and one for ONNX export)
:param parent_path_suffix: path suffix of the parent of the network
(e.g. could be name of level manager plus name of agent)
:return: saver collection for the network
"""
raise NotImplementedError | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/architecture.py | 0.96606 | 0.664282 | architecture.py | pypi |
from typing import Type
from rl_coach.base_parameters import NetworkComponentParameters
class HeadParameters(NetworkComponentParameters):
def __init__(self, parameterized_class_name: str, activation_function: str = 'relu', name: str= 'head',
num_output_head_copies: int=1, rescale_gradient_from_head_by_factor: float=1.0,
loss_weight: float=1.0, dense_layer=None, is_training=False):
super().__init__(dense_layer=dense_layer)
self.activation_function = activation_function
self.name = name
self.num_output_head_copies = num_output_head_copies
self.rescale_gradient_from_head_by_factor = rescale_gradient_from_head_by_factor
self.loss_weight = loss_weight
self.parameterized_class_name = parameterized_class_name
self.is_training = is_training
@property
def path(self):
return 'rl_coach.architectures.tensorflow_components.heads:' + self.parameterized_class_name
class PPOHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="PPOHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class VHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns',
output_bias_initializer=None):
super().__init__(parameterized_class_name="VHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
class DDPGVHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns',
output_bias_initializer=None):
super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
class CategoricalQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None,
output_bias_initializer=None):
super().__init__(parameterized_class_name="CategoricalQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class RegressionHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, scheme=None,
output_bias_initializer=None):
super().__init__(parameterized_class_name="RegressionHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class DDPGActorHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True,
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DDPGActor", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.batchnorm = batchnorm
class WolpertingerActorHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True,
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="WolpertingerActorHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.batchnorm = batchnorm
class DNDQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='dnd_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DNDQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class DuelingQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='dueling_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="DuelingQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class MeasurementsPredictionHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='measurements_prediction_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="MeasurementsPredictionHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class NAFHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='naf_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="NAFHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class PolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='policy_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="PolicyHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class PPOVHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='ppo_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="PPOVHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class QHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="QHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class ClassificationHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='classification_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="ClassificationHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class QuantileRegressionQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='quantile_regression_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None):
super().__init__(parameterized_class_name="QuantileRegressionQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.output_bias_initializer = output_bias_initializer
class RainbowQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='rainbow_q_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="RainbowQHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class ACERPolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='acer_policy_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None):
super().__init__(parameterized_class_name="ACERPolicyHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
class SACPolicyHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_policy_head_params', dense_layer=None):
super().__init__(parameterized_class_name='SACPolicyHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
class SACQHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='sac_q_head_params', dense_layer=None,
layers_sizes: tuple = (256, 256), output_bias_initializer=None):
super().__init__(parameterized_class_name='SACQHead', activation_function=activation_function, name=name,
dense_layer=dense_layer)
self.network_layers_sizes = layers_sizes
self.output_bias_initializer = output_bias_initializer
class TD3VHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='relu', name: str='td3_v_head_params',
num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0,
loss_weight: float = 1.0, dense_layer=None, initializer='xavier',
output_bias_initializer=None):
super().__init__(parameterized_class_name="TD3VHead", activation_function=activation_function, name=name,
dense_layer=dense_layer, num_output_head_copies=num_output_head_copies,
rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor,
loss_weight=loss_weight)
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/head_parameters.py | 0.947308 | 0.25303 | head_parameters.py | pypi |
from typing import List, Tuple
from rl_coach.base_parameters import Frameworks, AgentParameters
from rl_coach.logger import failed_imports
from rl_coach.saver import SaverCollection
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import force_list
class NetworkWrapper(object):
"""
The network wrapper contains multiple copies of the same network, each one with a different set of weights which is
updating in a different time scale. The network wrapper will always contain an online network.
It will contain an additional slow updating target network if it was requested by the user,
and it will contain a global network shared between different workers, if Coach is run in a single-node
multi-process distributed mode. The network wrapper contains functionality for managing these networks and syncing
between them.
"""
def __init__(self, agent_parameters: AgentParameters, has_target: bool, has_global: bool, name: str,
spaces: SpacesDefinition, replicated_device=None, worker_device=None):
self.ap = agent_parameters
self.network_parameters = self.ap.network_wrappers[name]
self.has_target = has_target
self.has_global = has_global
self.name = name
self.sess = None
if self.network_parameters.framework == Frameworks.tensorflow:
try:
import tensorflow as tf
except ImportError:
raise Exception('Install tensorflow before using it as framework')
from rl_coach.architectures.tensorflow_components.general_network import GeneralTensorFlowNetwork
general_network = GeneralTensorFlowNetwork.construct
elif self.network_parameters.framework == Frameworks.mxnet:
try:
import mxnet as mx
except ImportError:
raise Exception('Install mxnet before using it as framework')
from rl_coach.architectures.mxnet_components.general_network import GeneralMxnetNetwork
general_network = GeneralMxnetNetwork.construct
else:
raise Exception("{} Framework is not supported"
.format(Frameworks().to_string(self.network_parameters.framework)))
variable_scope = "{}/{}".format(self.ap.full_name_id, name)
# Global network - the main network shared between threads
self.global_network = None
if self.has_global:
# we assign the parameters of this network on the parameters server
self.global_network = general_network(variable_scope=variable_scope,
devices=force_list(replicated_device),
agent_parameters=agent_parameters,
name='{}/global'.format(name),
global_network=None,
network_is_local=False,
spaces=spaces,
network_is_trainable=True)
# Online network - local copy of the main network used for playing
self.online_network = None
self.online_network = general_network(variable_scope=variable_scope,
devices=force_list(worker_device),
agent_parameters=agent_parameters,
name='{}/online'.format(name),
global_network=self.global_network,
network_is_local=True,
spaces=spaces,
network_is_trainable=True)
# Target network - a local, slow updating network used for stabilizing the learning
self.target_network = None
if self.has_target:
self.target_network = general_network(variable_scope=variable_scope,
devices=force_list(worker_device),
agent_parameters=agent_parameters,
name='{}/target'.format(name),
global_network=self.global_network,
network_is_local=True,
spaces=spaces,
network_is_trainable=False)
def sync(self):
"""
Initializes the weights of the networks to match each other
:return:
"""
self.update_online_network()
self.update_target_network()
def update_target_network(self, rate=1.0):
"""
Copy weights: online network >>> target network
:param rate: the rate of copying the weights - 1 for copying exactly
"""
if self.target_network:
self.target_network.set_weights(self.online_network.get_weights(), rate)
def update_online_network(self, rate=1.0):
"""
Copy weights: global network >>> online network
:param rate: the rate of copying the weights - 1 for copying exactly
"""
if self.global_network:
self.online_network.set_weights(self.global_network.get_weights(), rate)
def apply_gradients_to_global_network(self, gradients=None, additional_inputs=None):
"""
Apply gradients from the online network on the global network
:param gradients: optional gradients that will be used instead of teh accumulated gradients
:param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's
update ops also requires the inputs)
:return:
"""
if gradients is None:
gradients = self.online_network.accumulated_gradients
if self.network_parameters.shared_optimizer:
self.global_network.apply_gradients(gradients, additional_inputs=additional_inputs)
else:
self.online_network.apply_gradients(gradients, additional_inputs=additional_inputs)
def apply_gradients_to_online_network(self, gradients=None, additional_inputs=None):
"""
Apply gradients from the online network on itself
:param gradients: optional gradients that will be used instead of teh accumulated gradients
:param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's
update ops also requires the inputs)
:return:
"""
if gradients is None:
gradients = self.online_network.accumulated_gradients
self.online_network.apply_gradients(gradients, additional_inputs=additional_inputs)
def train_and_sync_networks(self, inputs, targets, additional_fetches=[], importance_weights=None,
use_inputs_for_apply_gradients=False):
"""
A generic training function that enables multi-threading training using a global network if necessary.
:param inputs: The inputs for the network.
:param targets: The targets corresponding to the given inputs
:param additional_fetches: Any additional tensor the user wants to fetch
:param importance_weights: A coefficient for each sample in the batch, which will be used to rescale the loss
error of this sample. If it is not given, the samples losses won't be scaled
:param use_inputs_for_apply_gradients: Add the inputs also for when applying gradients
(e.g. for incorporating batchnorm update ops)
:return: The loss of the training iteration
"""
result = self.online_network.accumulate_gradients(inputs, targets, additional_fetches=additional_fetches,
importance_weights=importance_weights, no_accumulation=True)
if use_inputs_for_apply_gradients:
self.apply_gradients_and_sync_networks(reset_gradients=False, additional_inputs=inputs)
else:
self.apply_gradients_and_sync_networks(reset_gradients=False)
return result
def apply_gradients_and_sync_networks(self, reset_gradients=True, additional_inputs=None):
"""
Applies the gradients accumulated in the online network to the global network or to itself and syncs the
networks if necessary
:param reset_gradients: If set to True, the accumulated gradients wont be reset to 0 after applying them to
the network. this is useful when the accumulated gradients are overwritten instead
if accumulated by the accumulate_gradients function. this allows reducing time
complexity for this function by around 10%
:param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's
update ops also requires the inputs)
"""
if self.global_network:
self.apply_gradients_to_global_network(additional_inputs=additional_inputs)
if reset_gradients:
self.online_network.reset_accumulated_gradients()
self.update_online_network()
else:
if reset_gradients:
self.online_network.apply_and_reset_gradients(self.online_network.accumulated_gradients,
additional_inputs=additional_inputs)
else:
self.online_network.apply_gradients(self.online_network.accumulated_gradients,
additional_inputs=additional_inputs)
def parallel_prediction(self, network_input_tuples: List[Tuple]):
"""
Run several network prediction in parallel. Currently this only supports running each of the network once.
:param network_input_tuples: a list of tuples where the first element is the network (online_network,
target_network or global_network) and the second element is the inputs
:return: the outputs of all the networks in the same order as the inputs were given
"""
return type(self.online_network).parallel_predict(self.sess, network_input_tuples)
def set_is_training(self, state: bool):
"""
Set the phase of the network between training and testing
:param state: The current state (True = Training, False = Testing)
:return: None
"""
self.online_network.set_is_training(state)
if self.has_target:
self.target_network.set_is_training(state)
def set_session(self, sess):
self.sess = sess
self.online_network.set_session(sess)
if self.global_network:
self.global_network.set_session(sess)
if self.target_network:
self.target_network.set_session(sess)
def __str__(self):
sub_networks = []
if self.global_network:
sub_networks.append("global network")
if self.online_network:
sub_networks.append("online network")
if self.target_network:
sub_networks.append("target network")
result = []
result.append("Network: {}, Copies: {} ({})".format(self.name, len(sub_networks), ' | '.join(sub_networks)))
result.append("-"*len(result[-1]))
result.append(str(self.online_network))
result.append("")
return '\n'.join(result)
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collect all of network's savers for global or online network
Note: global, online, and target network are all copies fo the same network which parameters that are
updated at different rates. So we only need to save one of the networks; the one that holds the most
recent parameters. target network is created for some agents and used for stabilizing training by
updating parameters from online network at a slower rate. As a result, target network never contains
the most recent set of parameters. In single-worker training, no global network is created and online
network contains the most recent parameters. In vertical distributed training with more than one worker,
global network is updated by all workers and contains the most recent parameters.
Therefore preference is given to global network if it exists, otherwise online network is used
for saving.
:param parent_path_suffix: path suffix of the parent of the network wrapper
(e.g. could be name of level manager plus name of agent)
:return: collection of all checkpoint objects
"""
if self.global_network:
savers = self.global_network.collect_savers(parent_path_suffix)
else:
savers = self.online_network.collect_savers(parent_path_suffix)
return savers | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/network_wrapper.py | 0.934492 | 0.364297 | network_wrapper.py | pypi |
from types import FunctionType
from mxnet.gluon import nn
from rl_coach.architectures import layers
from rl_coach.architectures.mxnet_components import utils
# define global dictionary for storing layer type to layer implementation mapping
mx_layer_dict = dict()
def reg_to_mx(layer_type) -> FunctionType:
""" function decorator that registers layer implementation
:return: decorated function
"""
def reg_impl_decorator(func):
assert layer_type not in mx_layer_dict
mx_layer_dict[layer_type] = func
return func
return reg_impl_decorator
def convert_layer(layer):
"""
If layer is callable, return layer, otherwise convert to MX type
:param layer: layer to be converted
:return: converted layer if not callable, otherwise layer itself
"""
if callable(layer):
return layer
return mx_layer_dict[type(layer)](layer)
class Conv2d(layers.Conv2d):
def __init__(self, num_filters: int, kernel_size: int, strides: int):
super(Conv2d, self).__init__(num_filters=num_filters, kernel_size=kernel_size, strides=strides)
def __call__(self) -> nn.Conv2D:
"""
returns a conv2d block
:return: conv2d block
"""
return nn.Conv2D(channels=self.num_filters, kernel_size=self.kernel_size, strides=self.strides)
@staticmethod
@reg_to_mx(layers.Conv2d)
def to_mx(base: layers.Conv2d):
return Conv2d(num_filters=base.num_filters, kernel_size=base.kernel_size, strides=base.strides)
class BatchnormActivationDropout(layers.BatchnormActivationDropout):
def __init__(self, batchnorm: bool=False, activation_function=None, dropout_rate: float=0):
super(BatchnormActivationDropout, self).__init__(
batchnorm=batchnorm, activation_function=activation_function, dropout_rate=dropout_rate)
def __call__(self):
"""
returns a list of mxnet batchnorm, activation and dropout layers
:return: batchnorm, activation and dropout layers
"""
block = nn.HybridSequential()
if self.batchnorm:
block.add(nn.BatchNorm())
if self.activation_function:
block.add(nn.Activation(activation=utils.get_mxnet_activation_name(self.activation_function)))
if self.dropout_rate:
block.add(nn.Dropout(self.dropout_rate))
return block
@staticmethod
@reg_to_mx(layers.BatchnormActivationDropout)
def to_mx(base: layers.BatchnormActivationDropout):
return BatchnormActivationDropout(
batchnorm=base.batchnorm,
activation_function=base.activation_function,
dropout_rate=base.dropout_rate)
class Dense(layers.Dense):
def __init__(self, units: int):
super(Dense, self).__init__(units=units)
def __call__(self):
"""
returns a mxnet dense layer
:return: dense layer
"""
# Set flatten to False for consistent behavior with tf.layers.dense
return nn.Dense(self.units, flatten=False)
@staticmethod
@reg_to_mx(layers.Dense)
def to_mx(base: layers.Dense):
return Dense(units=base.units) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/layers.py | 0.946057 | 0.473657 | layers.py | pypi |
import copy
from typing import Any, Dict, Generator, List, Tuple, Union
import numpy as np
import mxnet as mx
from mxnet import autograd, gluon, nd
from mxnet.ndarray import NDArray
from rl_coach.architectures.architecture import Architecture
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS, LOSS_OUT_TYPE_REGULARIZATION
from rl_coach.architectures.mxnet_components import utils
from rl_coach.architectures.mxnet_components.savers import ParameterDictSaver, OnnxSaver
from rl_coach.base_parameters import AgentParameters
from rl_coach.logger import screen
from rl_coach.saver import SaverCollection
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import force_list, squeeze_list
class MxnetArchitecture(Architecture):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, devices: List[mx.Context],
name: str = "", global_network=None, network_is_local: bool=True, network_is_trainable: bool=False):
"""
:param agent_parameters: the agent parameters
:param spaces: the spaces definition of the agent
:param name: the name of the network
:param global_network: the global network replica that is shared between all the workers
:param network_is_local: is the network global (shared between workers) or local (dedicated to the worker)
:param network_is_trainable: is the network trainable (we can apply gradients on it)
"""
super().__init__(agent_parameters, spaces, name)
self.middleware = None
self.network_is_local = network_is_local
self.global_network = global_network
if not self.network_parameters.tensorflow_support:
raise ValueError('TensorFlow is not supported for this agent')
self.losses = [] # type: List[HeadLoss]
self.shared_accumulated_gradients = []
self.curr_rnn_c_in = None
self.curr_rnn_h_in = None
self.gradients_wrt_inputs = []
self.train_writer = None
self.accumulated_gradients = None
self.network_is_trainable = network_is_trainable
self.is_training = False
self.model = None # type: GeneralModel
self._devices = self._sanitize_device_list(devices)
self.is_chief = self.ap.task_parameters.task_index == 0
self.network_is_global = not self.network_is_local and global_network is None
self.distributed_training = self.network_is_global or self.network_is_local and global_network is not None
self.optimizer_type = self.network_parameters.optimizer_type
if self.ap.task_parameters.seed is not None:
mx.random.seed(self.ap.task_parameters.seed)
# Call to child class to create the model
self.construct_model()
self.trainer = None # type: gluon.Trainer
def __str__(self):
return self.model.summary(*self._dummy_model_inputs())
@staticmethod
def _sanitize_device_list(devices: List[mx.Context]) -> List[mx.Context]:
"""
Returns intersection of devices with available devices. If no intersection, returns mx.cpu()
:param devices: list of requested devices
:return: list of devices that are actually available
"""
actual_device = [mx.cpu()] + [mx.gpu(i) for i in mx.test_utils.list_gpus()]
intersection = [dev for dev in devices if dev in actual_device]
if len(intersection) == 0:
intersection = [mx.cpu()]
screen.log('Requested devices {} not available. Default to CPU context.'.format(devices))
elif len(intersection) < len(devices):
screen.log('{} not available, using {}.'.format(
[dev for dev in devices if dev not in intersection], intersection))
return intersection
def _model_grads(self, index: int=0) ->\
Union[Generator[NDArray, NDArray, Any], Generator[List[NDArray], List[NDArray], Any]]:
"""
Creates a copy of model gradients and returns them in a list, in the same order as collect_params()
:param index: device index. Set to -1 to get a tuple of list of NDArrays for all devices
:return: a generator for model gradient values
"""
if index < 0:
return (p.list_grad() for p in self.model.collect_params().values() if p.grad_req != 'null')
else:
return (p.list_grad()[index] for p in self.model.collect_params().values() if p.grad_req != 'null')
def _model_input_shapes(self) -> List[List[int]]:
"""
Create a list of input array shapes
:return: type of input shapes
"""
allowed_inputs = copy.copy(self.spaces.state.sub_spaces)
allowed_inputs["action"] = copy.copy(self.spaces.action)
allowed_inputs["goal"] = copy.copy(self.spaces.goal)
embedders = self.model.nets[0].input_embedders
return list([1] + allowed_inputs[emb.embedder_name].shape.tolist() for emb in embedders)
def _dummy_model_inputs(self) -> Tuple[NDArray, ...]:
"""
Creates a tuple of input arrays with correct shapes that can be used for shape inference
of the model weights and for printing the summary
:return: tuple of inputs for model forward pass
"""
input_shapes = self._model_input_shapes()
inputs = tuple(nd.zeros(tuple(shape), ctx=self._devices[0]) for shape in input_shapes)
return inputs
def construct_model(self) -> None:
"""
Construct network model. Implemented by child class.
"""
raise NotImplementedError
def set_session(self, sess) -> None:
"""
Initializes the model parameters and creates the model trainer.
NOTEL Session for mxnet backend must be None.
:param sess: must be None
"""
assert sess is None
# FIXME Add initializer
self.model.collect_params().initialize(ctx=self._devices)
# Hybridize model and losses
self.model.hybridize()
for l in self.losses:
l.hybridize()
# Pass dummy data with correct shape to trigger shape inference and full parameter initialization
self.model(*self._dummy_model_inputs())
if self.network_is_trainable:
self.trainer = gluon.Trainer(
self.model.collect_params(), optimizer=self.optimizer, update_on_kvstore=False)
def reset_accumulated_gradients(self) -> None:
"""
Reset model gradients as well as accumulated gradients to zero. If accumulated gradients
have not been created yet, it constructs them on CPU.
"""
# Set model gradients to zero
for p in self.model.collect_params().values():
p.zero_grad()
# Set accumulated gradients to zero if already initialized, otherwise create a copy
if self.accumulated_gradients:
for a in self.accumulated_gradients:
a *= 0
else:
self.accumulated_gradients = [g.copy() for g in self._model_grads()]
def accumulate_gradients(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
additional_fetches: List[Tuple[int, str]] = None,
importance_weights: np.ndarray = None,
no_accumulation: bool = False) -> Tuple[float, List[float], float, list]:
"""
Runs a forward & backward pass, clips gradients if needed and accumulates them into the accumulation
:param inputs: environment states (observation, etc.) as well extra inputs required by loss. Shape of ndarray
is (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size)
:param targets: targets required by loss (e.g. sum of discounted rewards)
:param additional_fetches: additional fetches to calculate and return. Each fetch is specified as (int, str)
tuple of head-type-index and fetch-name. The tuple is obtained from each head.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously
calculated gradients
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list of
regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
if self.accumulated_gradients is None:
self.reset_accumulated_gradients()
embedders = [emb.embedder_name for emb in self.model.nets[0].input_embedders]
nd_inputs = tuple(nd.array(inputs[emb], ctx=self._devices[0]) for emb in embedders)
assert self.middleware.__class__.__name__ != 'LSTMMiddleware', "LSTM middleware not supported"
targets = force_list(targets)
with autograd.record():
out_per_head = utils.split_outputs_per_head(self.model(*nd_inputs), self.model.output_heads)
tgt_per_loss = utils.split_targets_per_loss(targets, self.losses)
losses = list()
regularizations = list()
additional_fetches = [(k, None) for k in additional_fetches]
for h, h_loss, h_out, l_tgt in zip(self.model.output_heads, self.losses, out_per_head, tgt_per_loss):
l_in = utils.get_loss_agent_inputs(inputs, head_type_idx=h.head_type_idx, loss=h_loss)
# Align arguments with loss.loss_forward and convert to NDArray
l_args = utils.to_mx_ndarray(utils.align_loss_args(h_out, l_in, l_tgt, h_loss), h_out[0].context)
# Calculate loss and all auxiliary outputs
loss_outputs = utils.loss_output_dict(utils.to_list(h_loss(*l_args)), h_loss.output_schema)
if LOSS_OUT_TYPE_LOSS in loss_outputs:
losses.extend(loss_outputs[LOSS_OUT_TYPE_LOSS])
if LOSS_OUT_TYPE_REGULARIZATION in loss_outputs:
regularizations.extend(loss_outputs[LOSS_OUT_TYPE_REGULARIZATION])
# Set additional fetches
for i, fetch in enumerate(additional_fetches):
head_type_idx, fetch_name = fetch[0] # fetch key is a tuple of (head_type_index, fetch_name)
if head_type_idx == h.head_type_idx:
assert fetch[1] is None # sanity check that fetch is None
additional_fetches[i] = (fetch[0], loss_outputs[fetch_name])
# Total loss is losses and regularization (NOTE: order is important)
total_loss_list = losses + regularizations
total_loss = nd.add_n(*total_loss_list)
# Calculate gradients
total_loss.backward()
assert self.optimizer_type != 'LBFGS', 'LBFGS not supported'
# allreduce gradients from all contexts
self.trainer.allreduce_grads()
model_grads_cpy = [g.copy() for g in self._model_grads()]
# Calculate global norm of gradients
# FIXME global norm is returned even when not used for clipping! Is this necessary?
# FIXME global norm might be calculated twice if clipping method is global norm
norm_unclipped_grads = utils.global_norm(model_grads_cpy)
# Clip gradients
if self.network_parameters.clip_gradients:
utils.clip_grad(
model_grads_cpy,
clip_method=self.network_parameters.gradients_clipping_method,
clip_val=self.network_parameters.clip_gradients,
inplace=True)
# Update self.accumulated_gradients depending on no_accumulation flag
if no_accumulation:
for acc_grad, model_grad in zip(self.accumulated_gradients, model_grads_cpy):
acc_grad[:] = model_grad
else:
for acc_grad, model_grad in zip(self.accumulated_gradients, model_grads_cpy):
acc_grad += model_grad
# result of of additional fetches
fetched_tensors = [fetch[1] for fetch in additional_fetches]
# convert everything to numpy or scalar before returning
result = utils.asnumpy_or_asscalar((total_loss, total_loss_list, norm_unclipped_grads, fetched_tensors))
return result
def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights and resets accumulated gradients to zero
:param gradients: The gradients to use for the update
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
self.apply_gradients(gradients, scaler)
self.reset_accumulated_gradients()
def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights
:param gradients: The gradients to use for the update
:param scaler: A scaling factor that allows rescaling the gradients before applying them.
The gradients will be MULTIPLIED by this factor
"""
assert self.optimizer_type != 'LBFGS'
batch_size = 1
if self.distributed_training and not self.network_parameters.async_training:
# rescale the gradients so that they average out with the gradients from the other workers
if self.network_parameters.scale_down_gradients_by_number_of_workers_for_sync_training:
batch_size = self.ap.task_parameters.num_training_tasks
# set parameter gradients to gradients passed in
for param_grad, gradient in zip(self._model_grads(-1), gradients):
for pg in param_grad:
pg[:] = gradient
# update gradients
self.trainer.update(batch_size=batch_size)
def _predict(self, inputs: Dict[str, np.ndarray]) -> Tuple[NDArray, ...]:
"""
Run a forward pass of the network using the given input
:param inputs: The input dictionary for the network. Key is name of the embedder.
:return: The network output
WARNING: must only call once per state since each call is assumed by LSTM to be a new time step.
"""
embedders = [emb.embedder_name for emb in self.model.nets[0].input_embedders]
nd_inputs = tuple(nd.array(inputs[emb], ctx=self._devices[0]) for emb in embedders)
assert self.middleware.__class__.__name__ != 'LSTMMiddleware'
output = self.model(*nd_inputs)
return output
def predict(self,
inputs: Dict[str, np.ndarray],
outputs: List[str]=None,
squeeze_output: bool=True,
initial_feed_dict: Dict[str, np.ndarray]=None) -> Tuple[np.ndarray, ...]:
"""
Run a forward pass of the network using the given input
:param inputs: The input dictionary for the network. Key is name of the embedder.
:param outputs: list of outputs to return. Return all outputs if unspecified (currently not supported)
:param squeeze_output: call squeeze_list on output if True
:param initial_feed_dict: a dictionary of extra inputs for forward pass (currently not supported)
:return: The network output
WARNING: must only call once per state since each call is assumed by LSTM to be a new time step.
"""
assert initial_feed_dict is None, "initial_feed_dict must be None"
assert outputs is None, "outputs must be None"
output = self._predict(inputs)
output = list(o.asnumpy() for o in output)
if squeeze_output:
output = squeeze_list(output)
return output
@staticmethod
def parallel_predict(sess: Any,
network_input_tuples: List[Tuple['MxnetArchitecture', Dict[str, np.ndarray]]]) -> \
Tuple[np.ndarray, ...]:
"""
:param sess: active session to use for prediction (must be None for MXNet)
:param network_input_tuples: tuple of network and corresponding input
:return: tuple of outputs from all networks
"""
assert sess is None
output = list()
for net, inputs in network_input_tuples:
output += net._predict(inputs)
return tuple(o.asnumpy() for o in output)
def train_on_batch(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
scaler: float = 1.,
additional_fetches: list = None,
importance_weights: np.ndarray = None) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a
forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to
update the weights.
:param inputs: environment states (observation, etc.) as well extra inputs required by loss. Shape of ndarray
is (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size)
:param targets: targets required by loss (e.g. sum of discounted rewards)
:param scaler: value to scale gradients by before optimizing network weights
:param additional_fetches: additional fetches to calculate and return. Each fetch is specified as (int, str)
tuple of head-type-index and fetch-name. The tuple is obtained from each head.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list
of regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
loss = self.accumulate_gradients(inputs, targets, additional_fetches=additional_fetches,
importance_weights=importance_weights)
self.apply_and_reset_gradients(self.accumulated_gradients, scaler)
return loss
def get_weights(self) -> gluon.ParameterDict:
"""
:return: a ParameterDict containing all network weights
"""
return self.model.collect_params()
def set_weights(self, weights: gluon.ParameterDict, new_rate: float=1.0) -> None:
"""
Sets the network weights from the given ParameterDict
:param new_rate: ratio for adding new and old weight values: val=rate*weights + (1-rate)*old_weights
"""
old_weights = self.model.collect_params()
for name, p in weights.items():
name = name[len(weights.prefix):] # Strip prefix
old_p = old_weights[old_weights.prefix + name] # Add prefix
old_p.set_data(new_rate * p._reduce() + (1 - new_rate) * old_p._reduce())
def get_variable_value(self, variable: Union[gluon.Parameter, NDArray]) -> np.ndarray:
"""
Get the value of a variable
:param variable: the variable
:return: the value of the variable
"""
if isinstance(variable, gluon.Parameter):
variable = variable._reduce().asnumpy()
if isinstance(variable, NDArray):
return variable.asnumpy()
return variable
def set_variable_value(self, assign_op: callable, value: Any, placeholder=None) -> None:
"""
Updates value of a variable.
:param assign_op: a callable assign function for setting the variable
:param value: a value to set the variable to
:param placeholder: unused (placeholder in symbolic framework backends)
"""
assert callable(assign_op)
assign_op(value)
def set_is_training(self, state: bool) -> None:
"""
Set the phase of the network between training and testing
:param state: The current state (True = Training, False = Testing)
:return: None
"""
self.is_training = state
def reset_internal_memory(self) -> None:
"""
Reset any internal memory used by the network. For example, an LSTM internal state
:return: None
"""
assert self.middleware.__class__.__name__ != 'LSTMMiddleware', 'LSTM middleware not supported'
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collection of all checkpoints for the network (typically only one checkpoint)
:param parent_path_suffix: path suffix of the parent of the network
(e.g. could be name of level manager plus name of agent)
:return: checkpoint collection for the network
"""
name = self.name.replace('/', '.')
savers = SaverCollection(ParameterDictSaver(
name="{}.{}".format(parent_path_suffix, name),
param_dict=self.model.collect_params()))
if self.ap.task_parameters.export_onnx_graph:
savers.add(OnnxSaver(
name="{}.{}.onnx".format(parent_path_suffix, name),
model=self.model,
input_shapes=self._model_input_shapes()))
return savers | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/architecture.py | 0.858748 | 0.272617 | architecture.py | pypi |
from typing import Any, List, Tuple
from mxnet import gluon, sym
from mxnet.contrib import onnx as onnx_mxnet
import numpy as np
from rl_coach.architectures.mxnet_components.utils import ScopedOnnxEnable
from rl_coach.saver import Saver
class ParameterDictSaver(Saver):
"""
Child class that implements saver for mxnet gluon parameter dictionary
"""
def __init__(self, name: str, param_dict: gluon.ParameterDict):
self._name = name
self._param_dict = param_dict
@property
def path(self):
"""
Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able.
"""
return self._name
def save(self, sess: None, save_path: str) -> List[str]:
"""
Save to save_path
:param sess: active session for session-based frameworks (e.g. TF)
:param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count).
:return: list of all saved paths
"""
assert sess is None
self._param_dict.save(save_path)
return [save_path]
def restore(self, sess: Any, restore_path: str):
"""
Restore from restore_path
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: full path to load checkpoint from.
"""
assert sess is None
self._param_dict.load(restore_path)
def merge(self, other: 'Saver'):
"""
Merge other saver into this saver
:param other: saver to be merged into self
"""
if not isinstance(other, ParameterDictSaver):
raise TypeError('merging only supported with ParameterDictSaver (type:{})'.format(type(other)))
self._param_dict.update(other._param_dict)
class OnnxSaver(Saver):
"""
Child class that implements saver for exporting gluon HybridBlock to ONNX
"""
def __init__(self, name: str, model: gluon.HybridBlock, input_shapes: List[List[int]]):
self._name = name
self._sym = self._get_onnx_sym(model, len(input_shapes))
self._param_dict = model.collect_params()
self._input_shapes = input_shapes
@staticmethod
def _get_onnx_sym(model: gluon.HybridBlock, num_inputs: int) -> sym.Symbol:
"""
Returns a symbolic graph for the model
:param model: gluon HybridBlock that constructs the symbolic graph
:param num_inputs: number of inputs to the graph
:return: symbol for the network
"""
var_args = [sym.Variable('Data{}'.format(i)) for i in range(num_inputs)]
with ScopedOnnxEnable(model):
return sym.Group(gluon.block._flatten(model(*var_args), "output")[0])
@property
def path(self):
"""
Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able.
"""
return self._name
def save(self, sess: None, save_path: str) -> List[str]:
"""
Save to save_path
:param sess: active session for session-based frameworks (e.g. TF). Must be None.
:param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count).
:return: list of all saved paths
"""
assert sess is None
params = {name:param._reduce() for name, param in self._param_dict.items()}
export_path = onnx_mxnet.export_model(self._sym, params, self._input_shapes, np.float32, save_path)
return [export_path]
def restore(self, sess: Any, restore_path: str):
"""
Restore from restore_path
:param sess: active session for session-based frameworks (e.g. TF)
:param restore_path: full path to load checkpoint from.
"""
assert sess is None
# Nothing to restore for ONNX
def merge(self, other: 'Saver'):
"""
Merge other saver into this saver
:param other: saver to be merged into self
"""
# No merging is supported for ONNX. self.path must be unique
raise RuntimeError('merging not supported for ONNX exporter') | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/savers.py | 0.908303 | 0.350199 | savers.py | pypi |
import copy
from itertools import chain
from typing import List, Tuple, Union
from types import ModuleType
import numpy as np
import mxnet as mx
from mxnet import nd, sym
from mxnet.gluon import HybridBlock
from mxnet.ndarray import NDArray
from mxnet.symbol import Symbol
from rl_coach.base_parameters import NetworkParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import HeadParameters, PPOHeadParameters
from rl_coach.architectures.head_parameters import PPOVHeadParameters, VHeadParameters, QHeadParameters
from rl_coach.architectures.middleware_parameters import MiddlewareParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters, LSTMMiddlewareParameters
from rl_coach.architectures.mxnet_components.architecture import MxnetArchitecture
from rl_coach.architectures.mxnet_components.embedders import ImageEmbedder, TensorEmbedder, VectorEmbedder
from rl_coach.architectures.mxnet_components.heads import Head, HeadLoss, PPOHead, PPOVHead, VHead, QHead
from rl_coach.architectures.mxnet_components.middlewares import FCMiddleware, LSTMMiddleware
from rl_coach.architectures.mxnet_components import utils
from rl_coach.base_parameters import AgentParameters, Device, DeviceType, EmbeddingMergerType
from rl_coach.spaces import SpacesDefinition, PlanarMapsObservationSpace, TensorObservationSpace
class GeneralMxnetNetwork(MxnetArchitecture):
"""
A generalized version of all possible networks implemented using mxnet.
"""
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'GeneralTensorFlowNetwork':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: a GeneralTensorFlowNetwork object
"""
return GeneralMxnetNetwork(*args, devices=[GeneralMxnetNetwork._mx_device(d) for d in devices], **kwargs)
@staticmethod
def _mx_device(device: Union[str, Device]) -> mx.Context:
"""
Convert device to tensorflow-specific device representation
:param device: either a specific string (used in distributed mode) which is returned without
any change or a Device type
:return: tensorflow-specific string for device
"""
if isinstance(device, Device):
if device.device_type == DeviceType.CPU:
return mx.cpu()
elif device.device_type == DeviceType.GPU:
return mx.gpu(device.index)
else:
raise ValueError("Invalid device_type: {}".format(device.device_type))
else:
raise ValueError("Invalid device instance type: {}".format(type(device)))
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
devices: List[mx.Context],
name: str,
global_network=None,
network_is_local: bool=True,
network_is_trainable: bool=False):
"""
:param agent_parameters: the agent parameters
:param spaces: the spaces definition of the agent
:param devices: list of devices to run the network on
:param name: the name of the network
:param global_network: the global network replica that is shared between all the workers
:param network_is_local: is the network global (shared between workers) or local (dedicated to the worker)
:param network_is_trainable: is the network trainable (we can apply gradients on it)
"""
self.network_wrapper_name = name.split('/')[0]
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
if self.network_parameters.use_separate_networks_per_head:
self.num_heads_per_network = 1
self.num_networks = len(self.network_parameters.heads_parameters)
else:
self.num_heads_per_network = len(self.network_parameters.heads_parameters)
self.num_networks = 1
super().__init__(agent_parameters, spaces, devices, name, global_network,
network_is_local, network_is_trainable)
def construct_model(self):
# validate the configuration
if len(self.network_parameters.input_embedders_parameters) == 0:
raise ValueError("At least one input type should be defined")
if len(self.network_parameters.heads_parameters) == 0:
raise ValueError("At least one output type should be defined")
if self.network_parameters.middleware_parameters is None:
raise ValueError("Exactly one middleware type should be defined")
self.model = GeneralModel(
num_networks=self.num_networks,
num_heads_per_network=self.num_heads_per_network,
network_is_local=self.network_is_local,
network_name=self.network_wrapper_name,
agent_parameters=self.ap,
network_parameters=self.network_parameters,
spaces=self.spaces)
self.losses = self.model.losses()
# Learning rate
lr_scheduler = None
if self.network_parameters.learning_rate_decay_rate != 0:
lr_scheduler = mx.lr_scheduler.FactorScheduler(
step=self.network_parameters.learning_rate_decay_steps,
factor=self.network_parameters.learning_rate_decay_rate)
# Optimizer
# FIXME Does this code for distributed training make sense?
if self.distributed_training and self.network_is_local and self.network_parameters.shared_optimizer:
# distributed training + is a local network + optimizer shared -> take the global optimizer
self.optimizer = self.global_network.optimizer
elif (self.distributed_training and self.network_is_local and not self.network_parameters.shared_optimizer)\
or self.network_parameters.shared_optimizer or not self.distributed_training:
if self.network_parameters.optimizer_type == 'Adam':
self.optimizer = mx.optimizer.Adam(
learning_rate=self.network_parameters.learning_rate,
beta1=self.network_parameters.adam_optimizer_beta1,
beta2=self.network_parameters.adam_optimizer_beta2,
epsilon=self.network_parameters.optimizer_epsilon,
lr_scheduler=lr_scheduler)
elif self.network_parameters.optimizer_type == 'RMSProp':
self.optimizer = mx.optimizer.RMSProp(
learning_rate=self.network_parameters.learning_rate,
gamma1=self.network_parameters.rms_prop_optimizer_decay,
epsilon=self.network_parameters.optimizer_epsilon,
lr_scheduler=lr_scheduler)
elif self.network_parameters.optimizer_type == 'LBFGS':
raise NotImplementedError('LBFGS optimizer not implemented')
else:
raise Exception("{} is not a valid optimizer type".format(self.network_parameters.optimizer_type))
@property
def output_heads(self):
return self.model.output_heads
def _get_activation(activation_function_string: str):
"""
Map the activation function from a string to the mxnet framework equivalent
:param activation_function_string: the type of the activation function
:return: mxnet activation function string
"""
return utils.get_mxnet_activation_name(activation_function_string)
def _sanitize_activation(params: Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]) ->\
Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]:
"""
Change activation function to the mxnet specific value
:param params: any parameter that has activation_function property
:return: copy of params with activation function correctly set
"""
params_copy = copy.copy(params)
params_copy.activation_function = _get_activation(params.activation_function)
return params_copy
def _get_input_embedder(spaces: SpacesDefinition,
input_name: str,
embedder_params: InputEmbedderParameters) -> ModuleType:
"""
Given an input embedder parameters class, creates the input embedder and returns it
:param input_name: the name of the input to the embedder (used for retrieving the shape). The input should
be a value within the state or the action.
:param embedder_params: the parameters of the class of the embedder
:return: the embedder instance
"""
allowed_inputs = copy.copy(spaces.state.sub_spaces)
allowed_inputs["action"] = copy.copy(spaces.action)
allowed_inputs["goal"] = copy.copy(spaces.goal)
if input_name not in allowed_inputs.keys():
raise ValueError("The key for the input embedder ({}) must match one of the following keys: {}"
.format(input_name, allowed_inputs.keys()))
type = "vector"
if isinstance(allowed_inputs[input_name], TensorObservationSpace):
type = "tensor"
elif isinstance(allowed_inputs[input_name], PlanarMapsObservationSpace):
type = "image"
def sanitize_params(params: InputEmbedderParameters):
params_copy = _sanitize_activation(params)
# params_copy.input_rescaling = params_copy.input_rescaling[type]
# params_copy.input_offset = params_copy.input_offset[type]
params_copy.name = input_name
return params_copy
embedder_params = sanitize_params(embedder_params)
if type == 'vector':
module = VectorEmbedder(embedder_params)
elif type == 'image':
module = ImageEmbedder(embedder_params)
elif type == 'tensor':
module = TensorEmbedder(embedder_params)
else:
raise KeyError('Unsupported embedder type: {}'.format(type))
return module
def _get_middleware(middleware_params: MiddlewareParameters) -> ModuleType:
"""
Given a middleware type, creates the middleware and returns it
:param middleware_params: the paramaeters of the middleware class
:return: the middleware instance
"""
middleware_params = _sanitize_activation(middleware_params)
if isinstance(middleware_params, FCMiddlewareParameters):
module = FCMiddleware(middleware_params)
elif isinstance(middleware_params, LSTMMiddlewareParameters):
module = LSTMMiddleware(middleware_params)
else:
raise KeyError('Unsupported middleware type: {}'.format(type(middleware_params)))
return module
def _get_output_head(
head_params: HeadParameters,
head_idx: int,
head_type_index: int,
agent_params: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
is_local: bool) -> Head:
"""
Given a head type, creates the head and returns it
:param head_params: the parameters of the head to create
:param head_idx: the head index
:param head_type_index: the head type index (same index if head_param.num_output_head_copies>0)
:param agent_params: agent parameters
:param spaces: state and action space definitions
:param network_name: name of the network
:param is_local:
:return: head block
"""
head_params = _sanitize_activation(head_params)
if isinstance(head_params, PPOHeadParameters):
module = PPOHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, VHeadParameters):
module = VHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, PPOVHeadParameters):
module = PPOVHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
elif isinstance(head_params, QHeadParameters):
module = QHead(
agent_parameters=agent_params,
spaces=spaces,
network_name=network_name,
head_type_idx=head_type_index,
loss_weight=head_params.loss_weight,
is_local=is_local,
activation_function=head_params.activation_function,
dense_layer=head_params.dense_layer)
else:
raise KeyError('Unsupported head type: {}'.format(type(head_params)))
return module
class ScaledGradHead(HybridBlock, utils.OnnxHandlerBlock):
"""
Wrapper block for applying gradient scaling to input before feeding the head network
"""
def __init__(self,
head_index: int,
head_type_index: int,
network_name: str,
spaces: SpacesDefinition,
network_is_local: bool,
agent_params: AgentParameters,
head_params: HeadParameters) -> None:
"""
:param head_index: the head index
:param head_type_index: the head type index (same index if head_param.num_output_head_copies>0)
:param network_name: name of the network
:param spaces: state and action space definitions
:param network_is_local: whether network is local
:param agent_params: agent parameters
:param head_params: head parameters
"""
super(ScaledGradHead, self).__init__()
utils.OnnxHandlerBlock.__init__(self)
head_params = _sanitize_activation(head_params)
with self.name_scope():
self.head = _get_output_head(
head_params=head_params,
head_idx=head_index,
head_type_index=head_type_index,
agent_params=agent_params,
spaces=spaces,
network_name=network_name,
is_local=network_is_local)
self.gradient_rescaler = self.params.get_constant(
name='gradient_rescaler',
value=np.array([float(head_params.rescale_gradient_from_head_by_factor)]))
# self.gradient_rescaler = self.params.get(
# name='gradient_rescaler',
# shape=(1,),
# init=mx.init.Constant(float(head_params.rescale_gradient_from_head_by_factor)))
def hybrid_forward(self,
F: ModuleType,
x: Union[NDArray, Symbol],
gradient_rescaler: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]:
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol module
:param x: head input
:param gradient_rescaler: gradient rescaler for partial blocking of gradient
:return: head output
"""
if self._onnx:
# ONNX doesn't support BlockGrad() operator, but it's not typically needed for
# ONNX because mostly forward calls are performed using ONNX exported network.
grad_scaled_x = x
else:
grad_scaled_x = (F.broadcast_mul((1 - gradient_rescaler), F.BlockGrad(x)) +
F.broadcast_mul(gradient_rescaler, x))
out = self.head(grad_scaled_x)
return out
class SingleModel(HybridBlock):
"""
Block that connects a single embedder, with middleware and one to multiple heads
"""
def __init__(self,
network_is_local: bool,
network_name: str,
agent_parameters: AgentParameters,
in_emb_param_dict: {str: InputEmbedderParameters},
embedding_merger_type: EmbeddingMergerType,
middleware_param: MiddlewareParameters,
head_param_list: [HeadParameters],
head_type_idx_start: int,
spaces: SpacesDefinition,
*args, **kwargs):
"""
:param network_is_local: True if network is local
:param network_name: name of the network
:param agent_parameters: agent parameters
:param in_emb_param_dict: dictionary of embedder name to embedding parameters
:param embedding_merger_type: type of merging output of embedders: concatenate or sum
:param middleware_param: middleware parameters
:param head_param_list: list of head parameters, one per head type
:param head_type_idx_start: start index for head type index counting
:param spaces: state and action space definition
"""
super(SingleModel, self).__init__(*args, **kwargs)
self._embedding_merger_type = embedding_merger_type
self._input_embedders = list() # type: List[HybridBlock]
self._output_heads = list() # type: List[ScaledGradHead]
with self.name_scope():
for input_name in sorted(in_emb_param_dict):
input_type = in_emb_param_dict[input_name]
input_embedder = _get_input_embedder(spaces, input_name, input_type)
self.register_child(input_embedder)
self._input_embedders.append(input_embedder)
self.middleware = _get_middleware(middleware_param)
for i, head_param in enumerate(head_param_list):
for head_copy_idx in range(head_param.num_output_head_copies):
# create output head and add it to the output heads list
output_head = ScaledGradHead(
head_index=(head_type_idx_start + i) * head_param.num_output_head_copies + head_copy_idx,
head_type_index=head_type_idx_start + i,
network_name=network_name,
spaces=spaces,
network_is_local=network_is_local,
agent_params=agent_parameters,
head_params=head_param)
self.register_child(output_head)
self._output_heads.append(output_head)
def hybrid_forward(self, F, *inputs: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]:
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol block
:param inputs: model inputs, one for each embedder
:return: head outputs in a tuple
"""
# Input Embeddings
state_embedding = list()
for input, embedder in zip(inputs, self._input_embedders):
state_embedding.append(embedder(input))
# Merger
if len(state_embedding) == 1:
state_embedding = state_embedding[0]
else:
if self._embedding_merger_type == EmbeddingMergerType.Concat:
state_embedding = F.concat(*state_embedding, dim=1, name='merger') # NC or NCHW layout
elif self._embedding_merger_type == EmbeddingMergerType.Sum:
state_embedding = F.add_n(*state_embedding, name='merger')
# Middleware
state_embedding = self.middleware(state_embedding)
# Head
outputs = tuple()
for head in self._output_heads:
out = head(state_embedding)
if not isinstance(out, tuple):
out = (out,)
outputs += out
return outputs
@property
def input_embedders(self) -> List[HybridBlock]:
"""
:return: list of input embedders
"""
return self._input_embedders
@property
def output_heads(self) -> List[Head]:
"""
:return: list of output heads
"""
return [h.head for h in self._output_heads]
class GeneralModel(HybridBlock):
"""
Block that creates multiple single models
"""
def __init__(self,
num_networks: int,
num_heads_per_network: int,
network_is_local: bool,
network_name: str,
agent_parameters: AgentParameters,
network_parameters: NetworkParameters,
spaces: SpacesDefinition,
*args, **kwargs):
"""
:param num_networks: number of networks to create
:param num_heads_per_network: number of heads per network to create
:param network_is_local: True if network is local
:param network_name: name of the network
:param agent_parameters: agent parameters
:param network_parameters: network parameters
:param spaces: state and action space definitions
"""
super(GeneralModel, self).__init__(*args, **kwargs)
with self.name_scope():
self.nets = list()
for network_idx in range(num_networks):
head_type_idx_start = network_idx * num_heads_per_network
head_type_idx_end = head_type_idx_start + num_heads_per_network
net = SingleModel(
head_type_idx_start=head_type_idx_start,
network_name=network_name,
network_is_local=network_is_local,
agent_parameters=agent_parameters,
in_emb_param_dict=network_parameters.input_embedders_parameters,
embedding_merger_type=network_parameters.embedding_merger_type,
middleware_param=network_parameters.middleware_parameters,
head_param_list=network_parameters.heads_parameters[head_type_idx_start:head_type_idx_end],
spaces=spaces)
self.register_child(net)
self.nets.append(net)
def hybrid_forward(self, F, *inputs):
""" Overrides gluon.HybridBlock.hybrid_forward
:param nd or sym F: ndarray or symbol block
:param inputs: model inputs, one for each embedder. Passed to all networks.
:return: head outputs in a tuple
"""
outputs = tuple()
for net in self.nets:
out = net(*inputs)
outputs += out
return outputs
@property
def output_heads(self) -> List[Head]:
""" Return all heads in a single list
Note: There is a one-to-one mapping between output_heads and losses
:return: list of heads
"""
return list(chain.from_iterable(net.output_heads for net in self.nets))
def losses(self) -> List[HeadLoss]:
""" Construct loss blocks for network training
Note: There is a one-to-one mapping between output_heads and losses
:return: list of loss blocks
"""
return [h.loss() for net in self.nets for h in net.output_heads] | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/general_network.py | 0.86421 | 0.246851 | general_network.py | pypi |
import inspect
from typing import Any, Dict, Generator, Iterable, List, Tuple, Union
from types import ModuleType
import mxnet as mx
from mxnet import gluon, nd
from mxnet.ndarray import NDArray
import numpy as np
from rl_coach.core_types import GradientClippingMethod
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
def to_mx_ndarray(data: Union[list, tuple, np.ndarray, NDArray, int, float], ctx: mx.Context=None) ->\
Union[List[NDArray], Tuple[NDArray], NDArray]:
"""
Convert data to mx.nd.NDArray. Data can be a list or tuple of np.ndarray, int, or float or
it can be np.ndarray, int, or float
:param data: input data to be converted
:param ctx: context of the data (CPU, GPU0, GPU1, etc.)
:return: converted output data
"""
if isinstance(data, list):
data = [to_mx_ndarray(d, ctx=ctx) for d in data]
elif isinstance(data, tuple):
data = tuple(to_mx_ndarray(d, ctx=ctx) for d in data)
elif isinstance(data, np.ndarray):
data = nd.array(data, ctx=ctx)
elif isinstance(data, NDArray):
assert data.context == ctx
pass
elif isinstance(data, int) or isinstance(data, float):
data = nd.array([data], ctx=ctx)
else:
raise TypeError('Unsupported data type: {}'.format(type(data)))
return data
def asnumpy_or_asscalar(data: Union[NDArray, list, tuple]) -> Union[np.ndarray, np.number, list, tuple]:
"""
Convert NDArray (or list or tuple of NDArray) to numpy. If shape is (1,), then convert to scalar instead.
NOTE: This behavior is consistent with tensorflow
:param data: NDArray or list or tuple of NDArray
:return: data converted to numpy ndarray or to numpy scalar
"""
if isinstance(data, list):
data = [asnumpy_or_asscalar(d) for d in data]
elif isinstance(data, tuple):
data = tuple(asnumpy_or_asscalar(d) for d in data)
elif isinstance(data, NDArray):
data = data.asscalar() if data.shape == (1,) else data.asnumpy()
else:
raise TypeError('Unsupported data type: {}'.format(type(data)))
return data
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
"""
Calculate global norm on list or tuple of NDArrays using this formula:
`global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`
:param arrays: list or tuple of parameters to calculate global norm on
:return: single-value NDArray
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return nd.dot(x, x)
return array.norm().square()
total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
total_norm = nd.sqrt(total_norm)
return total_norm
def split_outputs_per_head(outputs: Tuple[NDArray], heads: list) -> List[List[NDArray]]:
"""
Split outputs into outputs per head
:param outputs: list of all outputs
:param heads: list of all heads
:return: list of outputs for each head
"""
head_outputs = []
for h in heads:
head_outputs.append(list(outputs[:h.num_outputs]))
outputs = outputs[h.num_outputs:]
assert len(outputs) == 0
return head_outputs
def split_targets_per_loss(targets: list, losses: list) -> List[list]:
"""
Splits targets into targets per loss
:param targets: list of all targets (typically numpy ndarray)
:param losses: list of all losses
:return: list of targets for each loss
"""
loss_targets = list()
for l in losses:
loss_data_len = len(l.input_schema.targets)
assert len(targets) >= loss_data_len, "Data length doesn't match schema"
loss_targets.append(targets[:loss_data_len])
targets = targets[loss_data_len:]
assert len(targets) == 0
return loss_targets
def get_loss_agent_inputs(inputs: Dict[str, np.ndarray], head_type_idx: int, loss: Any) -> List[np.ndarray]:
"""
Collects all inputs with prefix 'output_<head_idx>_' and matches them against agent_inputs in loss input schema.
:param inputs: list of all agent inputs
:param head_type_idx: head-type index of the corresponding head
:param loss: corresponding loss
:return: list of agent inputs for this loss. This list matches the length in loss input schema.
"""
loss_inputs = list()
for k in sorted(inputs.keys()):
if k.startswith('output_{}_'.format(head_type_idx)):
loss_inputs.append(inputs[k])
# Enforce that number of inputs for head_type are the same as agent_inputs specified by loss input_schema
assert len(loss_inputs) == len(loss.input_schema.agent_inputs), "agent_input length doesn't match schema"
return loss_inputs
def align_loss_args(
head_outputs: List[NDArray],
agent_inputs: List[np.ndarray],
targets: List[np.ndarray],
loss: Any) -> List[np.ndarray]:
"""
Creates a list of arguments from head_outputs, agent_inputs, and targets aligned with parameters of
loss.loss_forward() based on their name in loss input_schema
:param head_outputs: list of all head_outputs for this loss
:param agent_inputs: list of all agent_inputs for this loss
:param targets: list of all targets for this loss
:param loss: corresponding loss
:return: list of arguments in correct order to be passed to loss
"""
arg_list = list()
schema = loss.input_schema
assert len(schema.head_outputs) == len(head_outputs)
assert len(schema.agent_inputs) == len(agent_inputs)
assert len(schema.targets) == len(targets)
prev_found = True
for arg_name in inspect.getfullargspec(loss.loss_forward).args[2:]: # First two args are self and F
found = False
for schema_list, data in [(schema.head_outputs, head_outputs),
(schema.agent_inputs, agent_inputs),
(schema.targets, targets)]:
try:
arg_list.append(data[schema_list.index(arg_name)])
found = True
break
except ValueError:
continue
assert not found or prev_found, "missing arguments detected!"
prev_found = found
return arg_list
def to_tuple(data: Union[tuple, list, Any]):
"""
If input is list, it is converted to tuple. If it's tuple, it is returned untouched. Otherwise
returns a single-element tuple of the data.
:return: tuple-ified data
"""
if isinstance(data, tuple):
pass
elif isinstance(data, list):
data = tuple(data)
else:
data = (data,)
return data
def to_list(data: Union[tuple, list, Any]):
"""
If input is tuple, it is converted to list. If it's list, it is returned untouched. Otherwise
returns a single-element list of the data.
:return: list-ified data
"""
if isinstance(data, list):
pass
elif isinstance(data, tuple):
data = list(data)
else:
data = [data]
return data
def loss_output_dict(output: List[NDArray], schema: List[str]) -> Dict[str, List[NDArray]]:
"""
Creates a dictionary for loss output based on the output schema. If two output values have the same
type string in the schema they are concatenated in the same dicrionary item.
:param output: list of output values
:param schema: list of type-strings for output values
:return: dictionary of keyword to list of NDArrays
"""
assert len(output) == len(schema)
output_dict = dict()
for name, val in zip(schema, output):
if name in output_dict:
output_dict[name].append(val)
else:
output_dict[name] = [val]
return output_dict
def clip_grad(
grads: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]],
clip_method: GradientClippingMethod,
clip_val: float,
inplace=True) -> List[NDArray]:
"""
Clip gradient values inplace
:param grads: gradients to be clipped
:param clip_method: clipping method
:param clip_val: clipping value. Interpreted differently depending on clipping method.
:param inplace: modify grads if True, otherwise create NDArrays
:return: clipped gradients
"""
output = list(grads) if inplace else list(nd.empty(g.shape) for g in grads)
if clip_method == GradientClippingMethod.ClipByGlobalNorm:
norm_unclipped_grads = global_norm(grads)
scale = clip_val / (norm_unclipped_grads.asscalar() + 1e-8) # todo: use branching operators?
if scale < 1.0:
for g, o in zip(grads, output):
nd.broadcast_mul(g, nd.array([scale]), out=o)
elif clip_method == GradientClippingMethod.ClipByValue:
for g, o in zip(grads, output):
g.clip(-clip_val, clip_val, out=o)
elif clip_method == GradientClippingMethod.ClipByNorm:
for g, o in zip(grads, output):
nd.broadcast_mul(g, nd.minimum(1.0, clip_val / (g.norm() + 1e-8)), out=o)
else:
raise KeyError('Unsupported gradient clipping method')
return output
def hybrid_clip(F: ModuleType, x: nd_sym_type, clip_lower: nd_sym_type, clip_upper: nd_sym_type) -> nd_sym_type:
"""
Apply clipping to input x between clip_lower and clip_upper.
Added because F.clip doesn't support clipping bounds that are mx.nd.NDArray or mx.sym.Symbol.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: input data
:param clip_lower: lower bound used for clipping, should be of shape (1,)
:param clip_upper: upper bound used for clipping, should be of shape (1,)
:return: clipped data
"""
x_clip_lower = broadcast_like(F, clip_lower, x)
x_clip_upper = broadcast_like(F, clip_upper, x)
x_clipped = F.minimum(F.maximum(x, x_clip_lower), x_clip_upper)
return x_clipped
def broadcast_like(F: ModuleType, x: nd_sym_type, y: nd_sym_type) -> nd_sym_type:
"""
Implementation of broadcast_like using broadcast_add and broadcast_mul because ONNX doesn't support broadcast_like.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: input to be broadcast
:param y: tensor to broadcast x like
:return: broadcast x
"""
return F.broadcast_mul(x, (y * 0) + 1)
def get_mxnet_activation_name(activation_name: str):
"""
Convert coach activation name to mxnet specific activation name
:param activation_name: name of the activation inc coach
:return: name of the activation in mxnet
"""
activation_functions = {
'relu': 'relu',
'tanh': 'tanh',
'sigmoid': 'sigmoid',
# FIXME Add other activations
# 'elu': tf.nn.elu,
'selu': 'softrelu',
# 'leaky_relu': tf.nn.leaky_relu,
'none': None
}
assert activation_name in activation_functions, \
"Activation function must be one of the following {}. instead it was: {}".format(
activation_functions.keys(), activation_name)
return activation_functions[activation_name]
class OnnxHandlerBlock(object):
"""
Helper base class for gluon blocks that must behave differently for ONNX export forward pass
"""
def __init__(self):
self._onnx = False
def enable_onnx(self):
self._onnx = True
def disable_onnx(self):
self._onnx = False
class ScopedOnnxEnable(object):
"""
Helper scoped ONNX enable class
"""
def __init__(self, net: gluon.HybridBlock):
self._onnx_handlers = self._get_onnx_handlers(net)
def __enter__(self):
for b in self._onnx_handlers:
b.enable_onnx()
def __exit__(self, exc_type, exc_val, exc_tb):
for b in self._onnx_handlers:
b.disable_onnx()
@staticmethod
def _get_onnx_handlers(block: gluon.HybridBlock) -> List[OnnxHandlerBlock]:
"""
Iterates through all child blocks and return all of them that are instance of OnnxHandlerBlock
:return: list of OnnxHandlerBlock child blocks
"""
handlers = list()
if isinstance(block, OnnxHandlerBlock):
handlers.append(block)
for child_block in block._children.values():
handlers += ScopedOnnxEnable._get_onnx_handlers(child_block)
return handlers | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/utils.py | 0.924628 | 0.547827 | utils.py | pypi |
from typing import Union
from types import ModuleType
import mxnet as mx
from mxnet.gluon import rnn
from rl_coach.architectures.mxnet_components.layers import Dense
from rl_coach.architectures.mxnet_components.middlewares.middleware import Middleware
from rl_coach.architectures.middleware_parameters import LSTMMiddlewareParameters
from rl_coach.base_parameters import MiddlewareScheme
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class LSTMMiddleware(Middleware):
def __init__(self, params: LSTMMiddlewareParameters):
"""
LSTMMiddleware or Long Short Term Memory Middleware can be used in the middle part of the network. It takes the
embeddings from the input embedders, after they were aggregated in some method (for example, concatenation)
and passes it through a neural network which can be customizable but shared between the heads of the network.
:param params: parameters object containing batchnorm, activation_function, dropout and
number_of_lstm_cells properties.
"""
super(LSTMMiddleware, self).__init__(params)
self.number_of_lstm_cells = params.number_of_lstm_cells
with self.name_scope():
self.lstm = rnn.LSTM(hidden_size=self.number_of_lstm_cells)
@property
def schemes(self) -> dict:
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used for the
Middleware. Are used to create Block when LSTMMiddleware is initialised, and are applied before the LSTM.
:return: dictionary of schemes, with key of type MiddlewareScheme enum and value being list of mxnet.gluon.Block.
"""
return {
MiddlewareScheme.Empty:
[],
# Use for PPO
MiddlewareScheme.Shallow:
[
Dense(units=64)
],
# Use for DQN
MiddlewareScheme.Medium:
[
Dense(units=512)
],
MiddlewareScheme.Deep:
[
Dense(units=128),
Dense(units=128),
Dense(units=128)
]
}
def hybrid_forward(self,
F: ModuleType,
x: nd_sym_type,
*args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through LSTM middleware network.
Applies dense layers from selected scheme before passing result to LSTM layer.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: state embedding, of shape (batch_size, in_channels).
:return: state middleware embedding, where shape is (batch_size, channels).
"""
x_ntc = x.reshape(shape=(0, 0, -1))
emb_ntc = super(LSTMMiddleware, self).hybrid_forward(F, x_ntc, *args, **kwargs)
emb_tnc = emb_ntc.transpose(axes=(1, 0, 2))
return self.lstm(emb_tnc) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/middlewares/lstm_middleware.py | 0.948131 | 0.350477 | lstm_middleware.py | pypi |
from typing import Union
from types import ModuleType
import mxnet as mx
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class TensorEmbedder(InputEmbedder):
def __init__(self, params: InputEmbedderParameters):
"""
A tensor embedder is an input embedder that takes a tensor with arbitrary dimension and produces a vector
embedding by passing it through a neural network. An example is video data or 3D image data (i.e. 4D tensors)
or other type of data that is more than 1 dimension (i.e. not vector) but is not an image.
NOTE: There are no pre-defined schemes for tensor embedder. User must define a custom scheme by passing
a callable object as InputEmbedderParameters.scheme when defining the respective preset. This callable
object must return a Gluon HybridBlock. The hybrid_forward() of this block must accept a single input,
normalized observation, and return an embedding vector for each sample in the batch.
Keep in mind that the scheme is a list of blocks, which are stacked by optional batchnorm,
activation, and dropout in between as specified in InputEmbedderParameters.
:param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function
and dropout properties.
"""
super(TensorEmbedder, self).__init__(params)
self.input_rescaling = params.input_rescaling['tensor']
self.input_offset = params.input_offset['tensor']
@property
def schemes(self) -> dict:
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used
to create Block when InputEmbedder is initialised.
Note: Tensor embedder doesn't define any pre-defined scheme. User must provide custom scheme in preset.
:return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block.
For tensor embedder, this is an empty dictionary.
"""
return {}
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through embedder network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: image representing environment state, of shape (batch_size, in_channels, height, width).
:return: embedding of environment state, of shape (batch_size, channels).
"""
return super(TensorEmbedder, self).hybrid_forward(F, x, *args, **kwargs) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/embedders/tensor_embedder.py | 0.9504 | 0.507019 | tensor_embedder.py | pypi |
from typing import Union
from types import ModuleType
import mxnet as mx
from mxnet import nd, sym
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder
from rl_coach.architectures.mxnet_components.layers import Dense
from rl_coach.base_parameters import EmbedderScheme
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class VectorEmbedder(InputEmbedder):
def __init__(self, params: InputEmbedderParameters):
"""
An vector embedder is an input embedder that takes an vector input from the state and produces a vector
embedding by passing it through a neural network.
:param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function
and dropout properties.
"""
super(VectorEmbedder, self).__init__(params)
self.input_rescaling = params.input_rescaling['vector']
self.input_offset = params.input_offset['vector']
@property
def schemes(self):
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used
to create Block when VectorEmbedder is initialised.
:return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block.
"""
return {
EmbedderScheme.Empty:
[],
EmbedderScheme.Shallow:
[
Dense(units=128)
],
# Use for DQN
EmbedderScheme.Medium:
[
Dense(units=256)
],
# Use for Carla
EmbedderScheme.Deep:
[
Dense(units=128),
Dense(units=128),
Dense(units=128)
]
}
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through embedder network.
:param F: backend api, either `nd` or `sym` (if block has been hybridized).
:type F: nd or sym
:param x: vector representing environment state, of shape (batch_size, in_channels).
:return: embedding of environment state, of shape (batch_size, channels).
"""
if isinstance(x, nd.NDArray) and len(x.shape) != 2 and self.scheme != EmbedderScheme.Empty:
raise ValueError("Vector embedders expect the input size to have 2 dimensions. The given size is: {}"
.format(x.shape))
return super(VectorEmbedder, self).hybrid_forward(F, x, *args, **kwargs) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/embedders/vector_embedder.py | 0.94388 | 0.418756 | vector_embedder.py | pypi |
from typing import Union
from types import ModuleType
import mxnet as mx
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder
from rl_coach.architectures.mxnet_components.layers import Conv2d
from rl_coach.base_parameters import EmbedderScheme
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class ImageEmbedder(InputEmbedder):
def __init__(self, params: InputEmbedderParameters):
"""
An image embedder is an input embedder that takes an image input from the state and produces a vector
embedding by passing it through a neural network.
:param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function
and dropout properties.
"""
super(ImageEmbedder, self).__init__(params)
self.input_rescaling = params.input_rescaling['image']
self.input_offset = params.input_offset['image']
@property
def schemes(self) -> dict:
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used
to create Block when ImageEmbedder is initialised.
:return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block.
"""
return {
EmbedderScheme.Empty:
[],
EmbedderScheme.Shallow:
[
Conv2d(num_filters=32, kernel_size=8, strides=4)
],
# Use for Atari DQN
EmbedderScheme.Medium:
[
Conv2d(num_filters=32, kernel_size=8, strides=4),
Conv2d(num_filters=64, kernel_size=4, strides=2),
Conv2d(num_filters=64, kernel_size=3, strides=1)
],
# Use for Carla
EmbedderScheme.Deep:
[
Conv2d(num_filters=32, kernel_size=5, strides=2),
Conv2d(num_filters=32, kernel_size=3, strides=1),
Conv2d(num_filters=64, kernel_size=3, strides=2),
Conv2d(num_filters=64, kernel_size=3, strides=1),
Conv2d(num_filters=128, kernel_size=3, strides=2),
Conv2d(num_filters=128, kernel_size=3, strides=1),
Conv2d(num_filters=256, kernel_size=3, strides=2),
Conv2d(num_filters=256, kernel_size=3, strides=1)
]
}
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through embedder network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: image representing environment state, of shape (batch_size, in_channels, height, width).
:return: embedding of environment state, of shape (batch_size, channels).
"""
# convert from NHWC to NCHW (default for MXNet Convolutions)
x = x.transpose((0,3,1,2))
return super(ImageEmbedder, self).hybrid_forward(F, x, *args, **kwargs) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/embedders/image_embedder.py | 0.96738 | 0.46308 | image_embedder.py | pypi |
from typing import Union, List, Tuple
from types import ModuleType
import mxnet as mx
from mxnet.gluon.loss import Loss, HuberLoss, L2Loss
from mxnet.gluon import nn
from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\
NormalizedRSSInitializer
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import VStateValue
from rl_coach.spaces import SpacesDefinition
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class VHeadLoss(HeadLoss):
def __init__(self, loss_type: Loss=L2Loss, weight: float=1, batch_axis: int=0) -> None:
"""
Loss for Value Head.
:param loss_type: loss function with default of mean squared error (i.e. L2Loss).
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(VHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis)
with self.name_scope():
self.loss_fn = loss_type(weight=weight, batch_axis=batch_axis)
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['pred'],
agent_inputs=[],
targets=['target']
)
def loss_forward(self,
F: ModuleType,
pred: nd_sym_type,
target: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param pred: state values predicted by VHead network, of shape (batch_size).
:param target: actual state values, of shape (batch_size).
:return: loss, of shape (batch_size).
"""
loss = self.loss_fn(pred, target).mean()
return [(loss, LOSS_OUT_TYPE_LOSS)]
class VHead(Head):
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
head_type_idx: int=0,
loss_weight: float=1.,
is_local: bool=True,
activation_function: str='relu',
dense_layer: None=None,
loss_type: Union[HuberLoss, L2Loss]=L2Loss):
"""
Value Head for predicting state values.
:param agent_parameters: containing algorithm parameters, but currently unused.
:param spaces: containing action spaces, but currently unused.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
:param loss_type: loss function with default of mean squared error (i.e. L2Loss), or alternatively HuberLoss.
"""
super(VHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight,
is_local, activation_function, dense_layer)
assert (loss_type == L2Loss) or (loss_type == HuberLoss), "Only expecting L2Loss or HuberLoss."
self.loss_type = loss_type
self.return_type = VStateValue
with self.name_scope():
self.dense = nn.Dense(units=1, weight_initializer=NormalizedRSSInitializer(1.0))
def loss(self) -> Loss:
"""
Specifies loss block to be used for specific value head implementation.
:return: loss block (can be called as function) for outputs returned by the head network.
"""
return VHeadLoss(loss_type=self.loss_type, weight=self.loss_weight)
def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type:
"""
Used for forward pass through value head network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: middleware state representation, of shape (batch_size, in_channels).
:return: final output of value network, of shape (batch_size).
"""
return self.dense(x).squeeze(axis=1) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/heads/v_head.py | 0.976389 | 0.445288 | v_head.py | pypi |
from typing import Dict, List, Union, Tuple
import mxnet as mx
from mxnet.initializer import Initializer, register
from mxnet.gluon import nn, loss
from mxnet.ndarray import NDArray
from mxnet.symbol import Symbol
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
LOSS_OUT_TYPE_LOSS = 'loss'
LOSS_OUT_TYPE_REGULARIZATION = 'regularization'
@register
class NormalizedRSSInitializer(Initializer):
"""
Standardizes Root Sum of Squares along the input channel dimension.
Used for Dense layer weight matrices only (ie. do not use on Convolution kernels).
MXNet Dense layer weight matrix is of shape (out_ch, in_ch), so standardize across axis 1.
Root Sum of Squares set to `rss`, which is 1.0 by default.
Called `normalized_columns_initializer` in TensorFlow backend (but we work with rows instead of columns for MXNet).
"""
def __init__(self, rss=1.0):
super(NormalizedRSSInitializer, self).__init__(rss=rss)
self.rss = float(rss)
def _init_weight(self, name, arr):
mx.nd.random.normal(0, 1, out=arr)
sample_rss = arr.square().sum(axis=1).sqrt()
scalers = self.rss / sample_rss
arr *= scalers.expand_dims(1)
class LossInputSchema(object):
"""
Helper class to contain schema for loss hybrid_forward input
"""
def __init__(self, head_outputs: List[str], agent_inputs: List[str], targets: List[str]):
"""
:param head_outputs: list of argument names in hybrid_forward that are outputs of the head.
The order and number MUST MATCH the output from the head.
:param agent_inputs: list of argument names in hybrid_forward that are inputs from the agent.
The order and number MUST MATCH `output_<head_type_idx>_<order>` for this head.
:param targets: list of argument names in hybrid_forward that are targets for the loss.
The order and number MUST MATCH targets passed from the agent.
"""
self._head_outputs = head_outputs
self._agent_inputs = agent_inputs
self._targets = targets
@property
def head_outputs(self):
return self._head_outputs
@property
def agent_inputs(self):
return self._agent_inputs
@property
def targets(self):
return self._targets
class HeadLoss(loss.Loss):
"""
ABC for loss functions of each head. Child class must implement input_schema() and loss_forward()
"""
def __init__(self, *args, **kwargs):
super(HeadLoss, self).__init__(*args, **kwargs)
self._output_schema = None # type: List[str]
@property
def input_schema(self) -> LossInputSchema:
"""
:return: schema for input of hybrid_forward. Read docstring for LossInputSchema for details.
"""
raise NotImplementedError
@property
def output_schema(self) -> List[str]:
"""
:return: schema for output of hybrid_forward. Must contain 'loss' and 'regularization' keys at least once.
The order and total number must match that of returned values from the loss. 'loss' and 'regularization'
are special keys. Any other string is treated as auxiliary outputs and must include match auxiliary
fetch names returned by the head.
"""
return self._output_schema
def forward(self, *args):
"""
Override forward() so that number of outputs can be checked against the schema
"""
outputs = super(HeadLoss, self).forward(*args)
if isinstance(outputs, tuple) or isinstance(outputs, list):
num_outputs = len(outputs)
else:
assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol)
num_outputs = 1
assert num_outputs == len(self.output_schema), "Number of outputs don't match schema ({} != {})".format(
num_outputs, len(self.output_schema))
return outputs
def _loss_output(self, outputs: List[Tuple[Union[NDArray, Symbol], str]]):
"""
Must be called on the output from hybrid_forward().
Saves the returned output as the schema and returns output values in a list
:return: list of output values
"""
output_schema = [o[1] for o in outputs]
assert self._output_schema is None or self._output_schema == output_schema
self._output_schema = output_schema
return tuple(o[0] for o in outputs)
def hybrid_forward(self, F, x, *args, **kwargs):
"""
Passes the cal to loss_forward() and constructs output schema from its output by calling loss_output()
"""
return self._loss_output(self.loss_forward(F, x, *args, **kwargs))
def loss_forward(self, F, x, *args, **kwargs) -> List[Tuple[Union[NDArray, Symbol], str]]:
"""
Similar to hybrid_forward, but returns list of (NDArray, type_str)
"""
raise NotImplementedError
class Head(nn.HybridBlock):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition,
network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool=True,
activation_function: str='relu', dense_layer: None=None):
"""
A head is the final part of the network. It takes the embedding from the middleware embedder and passes it
through a neural network to produce the output of the network. There can be multiple heads in a network, and
each one has an assigned loss function. The heads are algorithm dependent.
:param agent_parameters: containing algorithm parameters such as clip_likelihood_ratio_using_epsilon
and beta_entropy.
:param spaces: containing action spaces used for defining size of network output.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
"""
super(Head, self).__init__()
self.head_type_idx = head_type_idx
self.network_name = network_name
self.loss_weight = loss_weight
self.is_local = is_local
self.ap = agent_parameters
self.spaces = spaces
self.return_type = None
self.activation_function = activation_function
self.dense_layer = dense_layer
self._num_outputs = None
def loss(self) -> HeadLoss:
"""
Returns loss block to be used for specific head implementation.
:return: loss block (can be called as function) for outputs returned by the head network.
"""
raise NotImplementedError()
@property
def num_outputs(self):
""" Returns number of outputs that forward() call will return
:return:
"""
assert self._num_outputs is not None, 'must call forward() once to configure number of outputs'
return self._num_outputs
def forward(self, *args):
"""
Override forward() so that number of outputs can be automatically set
"""
outputs = super(Head, self).forward(*args)
if isinstance(outputs, tuple):
num_outputs = len(outputs)
else:
assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol)
num_outputs = 1
if self._num_outputs is None:
self._num_outputs = num_outputs
else:
assert self._num_outputs == num_outputs, 'Number of outputs cannot change ({} != {})'.format(
self._num_outputs, num_outputs)
assert self._num_outputs == len(self.loss().input_schema.head_outputs)
return outputs
def hybrid_forward(self, F, x, *args, **kwargs):
"""
Used for forward pass through head network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: middleware state representation, of shape (batch_size, in_channels).
:return: final output of network, that will be used in loss calculations.
"""
raise NotImplementedError() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/heads/head.py | 0.960915 | 0.573081 | head.py | pypi |
from typing import List, Tuple, Union
from types import ModuleType
import mxnet as mx
from mxnet.gluon import nn
from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\
NormalizedRSSInitializer
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class PPOVHeadLoss(HeadLoss):
def __init__(self, clip_likelihood_ratio_using_epsilon: float, weight: float=1, batch_axis: int=0) -> None:
"""
Loss for PPO Value network.
Schulman implemented this extension in OpenAI baselines for PPO2
See https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py#L72
:param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping.
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(PPOVHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis)
self.weight = weight
self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['new_policy_values'],
agent_inputs=['old_policy_values'],
targets=['target_values']
)
def loss_forward(self,
F: ModuleType,
new_policy_values: nd_sym_type,
old_policy_values: nd_sym_type,
target_values: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
Calculates two losses (L2 and a clipped difference L2 loss) and takes the maximum of the two.
Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step,
new_policy_values, old_policy_values and target_values all must include a time_step dimension.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param new_policy_values: values predicted by PPOVHead network,
of shape (batch_size) or
of shape (batch_size, time_step).
:param old_policy_values: values predicted by old value network,
of shape (batch_size) or
of shape (batch_size, time_step).
:param target_values: actual state values,
of shape (batch_size) or
of shape (batch_size, time_step).
:return: loss, of shape (batch_size).
"""
# L2 loss
value_loss_1 = (new_policy_values - target_values).square()
# Clipped difference L2 loss
diff = new_policy_values - old_policy_values
clipped_diff = diff.clip(a_min=-self.clip_likelihood_ratio_using_epsilon,
a_max=self.clip_likelihood_ratio_using_epsilon)
value_loss_2 = (old_policy_values + clipped_diff - target_values).square()
# Maximum of the two losses, element-wise maximum.
value_loss_max = mx.nd.stack(value_loss_1, value_loss_2).max(axis=0)
# Aggregate over temporal axis, adding if doesn't exist (hense reshape)
value_loss_max_w_time = value_loss_max.reshape(shape=(0, -1))
value_loss = value_loss_max_w_time.mean(axis=1)
# Weight the loss (and average over samples of batch)
value_loss_weighted = value_loss.mean() * self.weight
return [(value_loss_weighted, LOSS_OUT_TYPE_LOSS)]
class PPOVHead(Head):
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
head_type_idx: int=0,
loss_weight: float=1.,
is_local: bool = True,
activation_function: str='relu',
dense_layer: None=None) -> None:
"""
PPO Value Head for predicting state values.
:param agent_parameters: containing algorithm parameters, but currently unused.
:param spaces: containing action spaces, but currently unused.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
"""
super(PPOVHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local,
activation_function, dense_layer=dense_layer)
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.return_type = ActionProbabilities
with self.name_scope():
self.dense = nn.Dense(units=1, weight_initializer=NormalizedRSSInitializer(1.0))
def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type:
"""
Used for forward pass through value head network.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware state representation, of shape (batch_size, in_channels).
:return: final value output of network, of shape (batch_size).
"""
return self.dense(x).squeeze(axis=1)
def loss(self) -> mx.gluon.loss.Loss:
"""
Specifies loss block to be used for specific value head implementation.
:return: loss block (can be called as function) for outputs returned by the value head network.
"""
return PPOVHeadLoss(self.clip_likelihood_ratio_using_epsilon, weight=self.loss_weight) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/heads/ppo_v_head.py | 0.971252 | 0.489015 | ppo_v_head.py | pypi |
from typing import Union, List, Tuple
from types import ModuleType
import mxnet as mx
from mxnet.gluon.loss import Loss, HuberLoss, L2Loss
from mxnet.gluon import nn
from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class QHeadLoss(HeadLoss):
def __init__(self, loss_type: Loss=L2Loss, weight: float=1, batch_axis: int=0) -> None:
"""
Loss for Q-Value Head.
:param loss_type: loss function with default of mean squared error (i.e. L2Loss).
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(QHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis)
with self.name_scope():
self.loss_fn = loss_type(weight=weight, batch_axis=batch_axis)
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['pred'],
agent_inputs=[],
targets=['target']
)
def loss_forward(self,
F: ModuleType,
pred: nd_sym_type,
target: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param pred: state-action q-values predicted by QHead network, of shape (batch_size, num_actions).
:param target: actual state-action q-values, of shape (batch_size, num_actions).
:return: loss, of shape (batch_size).
"""
loss = self.loss_fn(pred, target).mean()
return [(loss, LOSS_OUT_TYPE_LOSS)]
class QHead(Head):
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
head_type_idx: int=0,
loss_weight: float=1.,
is_local: bool=True,
activation_function: str='relu',
dense_layer: None=None,
loss_type: Union[HuberLoss, L2Loss]=L2Loss) -> None:
"""
Q-Value Head for predicting state-action Q-Values.
:param agent_parameters: containing algorithm parameters, but currently unused.
:param spaces: containing action spaces used for defining size of network output.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
:param loss_type: loss function to use.
"""
super(QHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight,
is_local, activation_function, dense_layer)
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = 1
elif isinstance(self.spaces.action, DiscreteActionSpace):
self.num_actions = len(self.spaces.action.actions)
self.return_type = QActionStateValue
assert (loss_type == L2Loss) or (loss_type == HuberLoss), "Only expecting L2Loss or HuberLoss."
self.loss_type = loss_type
with self.name_scope():
self.dense = nn.Dense(units=self.num_actions)
def loss(self) -> Loss:
"""
Specifies loss block to be used for specific value head implementation.
:return: loss block (can be called as function) for outputs returned by the head network.
"""
return QHeadLoss(loss_type=self.loss_type, weight=self.loss_weight)
def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type:
"""
Used for forward pass through Q-Value head network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: middleware state representation, of shape (batch_size, in_channels).
:return: predicted state-action q-values, of shape (batch_size, num_actions).
"""
return self.dense(x) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/mxnet_components/heads/q_head.py | 0.974893 | 0.45308 | q_head.py | pypi |
from typing import Tuple
import tensorflow as tf
def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec:
"""
Creates a ClusterSpec object representing the cluster.
:param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned
:param workers: comma-separated list of hostname:port pairs to which the workers are assigned
:return: a ClusterSpec object representing the cluster
"""
# extract the parameter servers and workers from the given strings
ps_hosts = parameters_server.split(",")
worker_hosts = workers.split(",")
# Create a cluster spec from the parameter server and worker hosts
cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
return cluster_spec
def create_and_start_parameters_server(cluster_spec: tf.train.ClusterSpec, config: tf.ConfigProto=None) -> None:
"""
Create and start a parameter server
:param cluster_spec: the ClusterSpec object representing the cluster
:param config: the tensorflow config to use
:return: None
"""
# create a server object for the parameter server
server = tf.train.Server(cluster_spec, job_name="ps", task_index=0, config=config)
# wait for the server to finish
server.join()
def create_worker_server_and_device(cluster_spec: tf.train.ClusterSpec, task_index: int,
use_cpu: bool=True, config: tf.ConfigProto=None) -> Tuple[str, tf.device]:
"""
Creates a worker server and a device setter used to assign the workers operations to
:param cluster_spec: a ClusterSpec object representing the cluster
:param task_index: the index of the worker task
:param use_cpu: if use_cpu=True, all the agent operations will be assigned to a CPU instead of a GPU
:param config: the tensorflow config to use
:return: the target string for the tf.Session and the worker device setter object
"""
# Create and start a worker
server = tf.train.Server(cluster_spec, job_name="worker", task_index=task_index, config=config)
# Assign ops to the local worker
worker_device = "/job:worker/task:{}".format(task_index)
if use_cpu:
worker_device += "/cpu:0"
else:
worker_device += "/device:GPU:0"
device = tf.train.replica_device_setter(worker_device=worker_device, cluster=cluster_spec)
return server.target, device
def create_monitored_session(target: tf.train.Server, task_index: int,
checkpoint_dir: str, checkpoint_save_secs: int, config: tf.ConfigProto=None) -> tf.Session:
"""
Create a monitored session for the worker
:param target: the target string for the tf.Session
:param task_index: the task index of the worker
:param checkpoint_dir: a directory path where the checkpoints will be stored
:param checkpoint_save_secs: number of seconds between checkpoints storing
:param config: the tensorflow configuration (optional)
:return: the session to use for the run
"""
# we chose the first task to be the chief
is_chief = task_index == 0
# Create the monitored session
sess = tf.train.MonitoredTrainingSession(
master=target,
is_chief=is_chief,
hooks=[],
checkpoint_dir=checkpoint_dir,
save_checkpoint_secs=checkpoint_save_secs,
config=config,
log_step_count_steps=0 # disable logging of steps to avoid TF warning during inference
)
return sess | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/distributed_tf_utils.py | 0.93739 | 0.62134 | distributed_tf_utils.py | pypi |
import math
from types import FunctionType
import tensorflow as tf
from rl_coach.architectures import layers
from rl_coach.architectures.tensorflow_components import utils
def batchnorm_activation_dropout(input_layer, batchnorm, activation_function, dropout_rate, is_training, name):
layers = [input_layer]
# Rationale: passing a bool here will mean that batchnorm and or activation will never activate
assert not isinstance(is_training, bool)
# batchnorm
if batchnorm:
layers.append(
tf.layers.batch_normalization(layers[-1], name="{}_batchnorm".format(name), training=is_training)
)
# activation
if activation_function:
if isinstance(activation_function, str):
activation_function = utils.get_activation_function(activation_function)
layers.append(
activation_function(layers[-1], name="{}_activation".format(name))
)
# dropout
if dropout_rate > 0:
layers.append(
tf.layers.dropout(layers[-1], dropout_rate, name="{}_dropout".format(name), training=is_training)
)
# remove the input layer from the layers list
del layers[0]
return layers
# define global dictionary for storing layer type to layer implementation mapping
tf_layer_dict = dict()
tf_layer_class_dict = dict()
def reg_to_tf_instance(layer_type) -> FunctionType:
""" function decorator that registers layer implementation
:return: decorated function
"""
def reg_impl_decorator(func):
assert layer_type not in tf_layer_dict
tf_layer_dict[layer_type] = func
return func
return reg_impl_decorator
def reg_to_tf_class(layer_type) -> FunctionType:
""" function decorator that registers layer type
:return: decorated function
"""
def reg_impl_decorator(func):
assert layer_type not in tf_layer_class_dict
tf_layer_class_dict[layer_type] = func
return func
return reg_impl_decorator
def convert_layer(layer):
"""
If layer instance is callable (meaning this is already a concrete TF class), return layer, otherwise convert to TF type
:param layer: layer to be converted
:return: converted layer if not callable, otherwise layer itself
"""
if callable(layer):
return layer
return tf_layer_dict[type(layer)](layer)
def convert_layer_class(layer_class):
"""
If layer instance is callable, return layer, otherwise convert to TF type
:param layer: layer to be converted
:return: converted layer if not callable, otherwise layer itself
"""
if hasattr(layer_class, 'to_tf_instance'):
return layer_class
else:
return tf_layer_class_dict[layer_class]()
class Conv2d(layers.Conv2d):
def __init__(self, num_filters: int, kernel_size: int, strides: int):
super(Conv2d, self).__init__(num_filters=num_filters, kernel_size=kernel_size, strides=strides)
def __call__(self, input_layer, name: str=None, is_training=None):
"""
returns a tensorflow conv2d layer
:param input_layer: previous layer
:param name: layer name
:return: conv2d layer
"""
return tf.layers.conv2d(input_layer, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.strides, data_format='channels_last', name=name)
@staticmethod
@reg_to_tf_instance(layers.Conv2d)
def to_tf_instance(base: layers.Conv2d):
return Conv2d(
num_filters=base.num_filters,
kernel_size=base.kernel_size,
strides=base.strides)
@staticmethod
@reg_to_tf_class(layers.Conv2d)
def to_tf_class():
return Conv2d
class BatchnormActivationDropout(layers.BatchnormActivationDropout):
def __init__(self, batchnorm: bool=False, activation_function=None, dropout_rate: float=0):
super(BatchnormActivationDropout, self).__init__(
batchnorm=batchnorm, activation_function=activation_function, dropout_rate=dropout_rate)
def __call__(self, input_layer, name: str=None, is_training=None):
"""
returns a list of tensorflow batchnorm, activation and dropout layers
:param input_layer: previous layer
:param name: layer name
:return: batchnorm, activation and dropout layers
"""
return batchnorm_activation_dropout(input_layer, batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout_rate=self.dropout_rate,
is_training=is_training, name=name)
@staticmethod
@reg_to_tf_instance(layers.BatchnormActivationDropout)
def to_tf_instance(base: layers.BatchnormActivationDropout):
return BatchnormActivationDropout, BatchnormActivationDropout(
batchnorm=base.batchnorm,
activation_function=base.activation_function,
dropout_rate=base.dropout_rate)
@staticmethod
@reg_to_tf_class(layers.BatchnormActivationDropout)
def to_tf_class():
return BatchnormActivationDropout
class Dense(layers.Dense):
def __init__(self, units: int):
super(Dense, self).__init__(units=units)
def __call__(self, input_layer, name: str=None, kernel_initializer=None, bias_initializer=None,
activation=None, is_training=None):
"""
returns a tensorflow dense layer
:param input_layer: previous layer
:param name: layer name
:return: dense layer
"""
if bias_initializer is None:
bias_initializer = tf.zeros_initializer()
return tf.layers.dense(input_layer, self.units, name=name, kernel_initializer=kernel_initializer,
activation=activation, bias_initializer=bias_initializer)
@staticmethod
@reg_to_tf_instance(layers.Dense)
def to_tf_instance(base: layers.Dense):
return Dense(units=base.units)
@staticmethod
@reg_to_tf_class(layers.Dense)
def to_tf_class():
return Dense
class NoisyNetDense(layers.NoisyNetDense):
"""
A factorized Noisy Net layer
https://arxiv.org/abs/1706.10295.
"""
def __init__(self, units: int):
super(NoisyNetDense, self).__init__(units=units)
def __call__(self, input_layer, name: str, kernel_initializer=None, activation=None, is_training=None,
bias_initializer=None):
"""
returns a NoisyNet dense layer
:param input_layer: previous layer
:param name: layer name
:param kernel_initializer: initializer for kernels. Default is to use Gaussian noise that preserves stddev.
:param activation: the activation function
:return: dense layer
"""
#TODO: noise sampling should be externally controlled. DQN is fine with sampling noise for every
# forward (either act or train, both for online and target networks).
# A3C, on the other hand, should sample noise only when policy changes (i.e. after every t_max steps)
def _f(values):
return tf.sqrt(tf.abs(values)) * tf.sign(values)
def _factorized_noise(inputs, outputs):
# TODO: use factorized noise only for compute intensive algos (e.g. DQN).
# lighter algos (e.g. DQN) should not use it
noise1 = _f(tf.random_normal((inputs, 1)))
noise2 = _f(tf.random_normal((1, outputs)))
return tf.matmul(noise1, noise2)
num_inputs = input_layer.get_shape()[-1].value
num_outputs = self.units
stddev = 1 / math.sqrt(num_inputs)
activation = activation if activation is not None else (lambda x: x)
if kernel_initializer is None:
kernel_mean_initializer = tf.random_uniform_initializer(-stddev, stddev)
kernel_stddev_initializer = tf.random_uniform_initializer(-stddev * self.sigma0, stddev * self.sigma0)
else:
kernel_mean_initializer = kernel_stddev_initializer = kernel_initializer
if bias_initializer is None:
bias_initializer = tf.zeros_initializer()
with tf.variable_scope(None, default_name=name):
weight_mean = tf.get_variable('weight_mean', shape=(num_inputs, num_outputs),
initializer=kernel_mean_initializer)
bias_mean = tf.get_variable('bias_mean', shape=(num_outputs,), initializer=bias_initializer)
weight_stddev = tf.get_variable('weight_stddev', shape=(num_inputs, num_outputs),
initializer=kernel_stddev_initializer)
bias_stddev = tf.get_variable('bias_stddev', shape=(num_outputs,),
initializer=kernel_stddev_initializer)
bias_noise = _f(tf.random_normal((num_outputs,)))
weight_noise = _factorized_noise(num_inputs, num_outputs)
bias = bias_mean + bias_stddev * bias_noise
weight = weight_mean + weight_stddev * weight_noise
return activation(tf.matmul(input_layer, weight) + bias)
@staticmethod
@reg_to_tf_instance(layers.NoisyNetDense)
def to_tf_instance(base: layers.NoisyNetDense):
return NoisyNetDense(units=base.units)
@staticmethod
@reg_to_tf_class(layers.NoisyNetDense)
def to_tf_class():
return NoisyNetDense | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/layers.py | 0.843315 | 0.475605 | layers.py | pypi |
from typing import Union, List
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.middlewares.middleware import Middleware
from rl_coach.base_parameters import MiddlewareScheme
from rl_coach.core_types import Middleware_FC_Embedding
from rl_coach.utils import force_list
class FCMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_fc_embedder", dense_layer=Dense, is_training=False, num_streams: int = 1):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_FC_Embedding
assert(isinstance(num_streams, int) and num_streams >= 1)
self.num_streams = num_streams
def _build_module(self):
self.output = []
for stream_idx in range(self.num_streams):
layers = [self.input]
for idx, layer_params in enumerate(self.layers_params):
layers.extend(force_list(
layer_params(layers[-1], name='{}_{}'.format(layer_params.__class__.__name__,
idx + stream_idx * len(self.layers_params)),
is_training=self.is_training)
))
self.output.append((layers[-1]))
@property
def schemes(self):
return {
MiddlewareScheme.Empty:
[],
# ppo
MiddlewareScheme.Shallow:
[
self.dense_layer(64)
],
# dqn
MiddlewareScheme.Medium:
[
self.dense_layer(512)
],
MiddlewareScheme.Deep: \
[
self.dense_layer(128),
self.dense_layer(128),
self.dense_layer(128)
]
}
def __str__(self):
stream = [str(l) for l in self.layers_params]
if self.layers_params:
if self.num_streams > 1:
stream = [''] + ['\t' + l for l in stream]
result = stream * self.num_streams
result[0::len(stream)] = ['Stream {}'.format(i) for i in range(self.num_streams)]
else:
result = stream
return '\n'.join(result)
else:
return 'No layers' | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/middlewares/fc_middleware.py | 0.879406 | 0.262254 | fc_middleware.py | pypi |
import numpy as np
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.middlewares.middleware import Middleware
from rl_coach.base_parameters import MiddlewareScheme
from rl_coach.core_types import Middleware_LSTM_Embedding
from rl_coach.utils import force_list
class LSTMMiddleware(Middleware):
def __init__(self, activation_function=tf.nn.relu, number_of_lstm_cells: int=256,
scheme: MiddlewareScheme = MiddlewareScheme.Medium,
batchnorm: bool = False, dropout_rate: float = 0.0,
name="middleware_lstm_embedder", dense_layer=Dense, is_training=False):
super().__init__(activation_function=activation_function, batchnorm=batchnorm,
dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer,
is_training=is_training)
self.return_type = Middleware_LSTM_Embedding
self.number_of_lstm_cells = number_of_lstm_cells
self.layers = []
def _build_module(self):
"""
self.state_in: tuple of placeholders containing the initial state
self.state_out: tuple of output state
todo: it appears that the shape of the output is batch, feature
the code here seems to be slicing off the first element in the batch
which would definitely be wrong. need to double check the shape
"""
self.layers.append(self.input)
# optionally insert some layers before the LSTM
for idx, layer_params in enumerate(self.layers_params):
self.layers.extend(force_list(
layer_params(self.layers[-1], name='fc{}'.format(idx),
is_training=self.is_training)
))
# add the LSTM layer
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.number_of_lstm_cells, state_is_tuple=True)
self.c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
self.h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [self.c_init, self.h_init]
self.c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
self.h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
self.state_in = (self.c_in, self.h_in)
rnn_in = tf.expand_dims(self.layers[-1], [0])
step_size = tf.shape(self.layers[-1])[:1]
state_in = tf.nn.rnn_cell.LSTMStateTuple(self.c_in, self.h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
self.output = tf.reshape(lstm_outputs, [-1, self.number_of_lstm_cells])
@property
def schemes(self):
return {
MiddlewareScheme.Empty:
[],
# ppo
MiddlewareScheme.Shallow:
[
self.dense_layer(64)
],
# dqn
MiddlewareScheme.Medium:
[
self.dense_layer(512)
],
MiddlewareScheme.Deep: \
[
self.dense_layer(128),
self.dense_layer(128),
self.dense_layer(128)
]
} | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/middlewares/lstm_middleware.py | 0.881793 | 0.242935 | lstm_middleware.py | pypi |
from typing import List
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense
from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedder
from rl_coach.base_parameters import EmbedderScheme
from rl_coach.core_types import InputTensorEmbedding
class TensorEmbedder(InputEmbedder):
"""
A tensor embedder is an input embedder that takes a tensor with arbitrary dimension and produces a vector
embedding by passing it through a neural network. An example is video data or 3D image data (i.e. 4D tensors)
or other type of data that is more than 1 dimension (i.e. not vector) but is not an image.
NOTE: There are no pre-defined schemes for tensor embedder. User must define a custom scheme by passing
a callable object as InputEmbedderParameters.scheme when defining the respective preset. This callable
object must accept a single input, the normalized observation, and return a Tensorflow symbol which
will calculate an embedding vector for each sample in the batch.
Keep in mind that the scheme is a list of Tensorflow symbols, which are stacked by optional batchnorm,
activation, and dropout in between as specified in InputEmbedderParameters.
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=None, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling: float=1.0, input_offset: float=0.0, input_clipping=None,
dense_layer=Dense, is_training=False):
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling,
input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training)
self.return_type = InputTensorEmbedding
assert scheme is not None, "Custom scheme (a list of callables) must be specified for TensorEmbedder"
@property
def schemes(self):
return {} | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/embedders/tensor_embedder.py | 0.953177 | 0.591989 | tensor_embedder.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.