code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import List
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense
from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedder
from rl_coach.base_parameters import EmbedderScheme
from rl_coach.core_types import InputImageEmbedding
class ImageEmbedder(InputEmbedder):
"""
An input embedder that performs convolutions on the input and then flattens the result.
The embedder is intended for image like inputs, where the channels are expected to be the last axis.
The embedder also allows custom rescaling of the input prior to the neural network.
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling: float=255.0, input_offset: float=0.0, input_clipping=None,
dense_layer=Dense, is_training=False):
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling,
input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training)
self.return_type = InputImageEmbedding
if len(input_size) != 3 and scheme != EmbedderScheme.Empty:
raise ValueError("Image embedders expect the input size to have 3 dimensions. The given size is: {}"
.format(input_size))
@property
def schemes(self):
return {
EmbedderScheme.Empty:
[],
EmbedderScheme.Shallow:
[
Conv2d(32, 3, 1)
],
# atari dqn
EmbedderScheme.Medium:
[
Conv2d(32, 8, 4),
Conv2d(64, 4, 2),
Conv2d(64, 3, 1)
],
# carla
EmbedderScheme.Deep: \
[
Conv2d(32, 5, 2),
Conv2d(32, 3, 1),
Conv2d(64, 3, 2),
Conv2d(64, 3, 1),
Conv2d(128, 3, 2),
Conv2d(128, 3, 1),
Conv2d(256, 3, 2),
Conv2d(256, 3, 1)
]
} | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/embedders/image_embedder.py | 0.947805 | 0.500183 | image_embedder.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.q_head import QHead
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class DuelingQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'dueling_q_values_head'
def _build_module(self, input_layer):
# state value tower - V
with tf.variable_scope("state_value"):
self.state_value = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
self.state_value = self.dense_layer(1)(self.state_value, name='fc2')
# action advantage tower - A
with tf.variable_scope("action_advantage"):
self.action_advantage = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
self.action_advantage = self.dense_layer(self.num_actions)(self.action_advantage, name='fc2')
self.action_mean = tf.reduce_mean(self.action_advantage, axis=1, keepdims=True)
self.action_advantage = self.action_advantage - self.action_mean
# merge to state-action value function Q
self.q_values = self.output = tf.add(self.state_value, self.action_advantage, name='output')
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 512)",
"\tDense (num outputs = 1)",
"Action Advantage Stream - A",
"\tDense (num outputs = 512)",
"\tDense (num outputs = {})".format(self.num_actions),
"\tSubtract(A, Mean(A))".format(self.num_actions),
"Add (V, A)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/dueling_q_head.py | 0.820254 | 0.296024 | dueling_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps
LOG_SIG_CAP_MAX = 2
LOG_SIG_CAP_MIN = -20
class SACPolicyHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
squash: bool = True, dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'sac_policy_head'
self.return_type = ActionProbabilities
self.num_actions = self.spaces.action.shape # continuous actions
self.squash = squash # squashing using tanh
def _build_module(self, input_layer):
self.given_raw_actions = tf.placeholder(tf.float32, [None, self.num_actions], name="actions")
self.input = [self.given_raw_actions]
self.output = []
# build the network
self._build_continuous_net(input_layer, self.spaces.action)
def _squash_correction(self,actions):
'''
correct squash operation (in case of bounded actions) according to appendix C in the paper.
NOTE : this correction assume the squash is done with tanh.
:param actions: unbounded actions
:return: the correction to be applied to the log_prob of the actions, assuming tanh squash
'''
if not self.squash:
return 0
return tf.reduce_sum(tf.log(1 - tf.tanh(actions) ** 2 + eps), axis=1)
def _build_continuous_net(self, input_layer, action_space):
num_actions = action_space.shape[0]
self.policy_mu_and_logsig = self.dense_layer(2*num_actions)(input_layer, name='policy_mu_logsig')
self.policy_mean = tf.identity(self.policy_mu_and_logsig[..., :num_actions], name='policy_mean')
self.policy_log_std = tf.clip_by_value(self.policy_mu_and_logsig[..., num_actions:],
LOG_SIG_CAP_MIN, LOG_SIG_CAP_MAX,name='policy_log_std')
self.output.append(self.policy_mean) # output[0]
self.output.append(self.policy_log_std) # output[1]
# define the distributions for the policy
# Tensorflow's multivariate normal distribution supports reparameterization
tfd = tf.contrib.distributions
self.policy_distribution = tfd.MultivariateNormalDiag(loc=self.policy_mean,
scale_diag=tf.exp(self.policy_log_std))
# define network outputs
# note that tensorflow supports reparametrization.
# i.e. policy_action_sample is a tensor through which gradients can flow
self.raw_actions = self.policy_distribution.sample()
if self.squash:
self.actions = tf.tanh(self.raw_actions)
# correct log_prob in case of squash (see appendix C in the paper)
squash_correction = self._squash_correction(self.raw_actions)
else:
self.actions = self.raw_actions
squash_correction = 0
# policy_action_logprob is a tensor through which gradients can flow
self.sampled_actions_logprob = self.policy_distribution.log_prob(self.raw_actions) - squash_correction
self.sampled_actions_logprob_mean = tf.reduce_mean(self.sampled_actions_logprob)
self.output.append(self.raw_actions) # output[2] : sampled raw action (before squash)
self.output.append(self.actions) # output[3] : squashed (if needed) version of sampled raw_actions
self.output.append(self.sampled_actions_logprob) # output[4]: log prob of sampled action (squash corrected)
self.output.append(self.sampled_actions_logprob_mean) # output[5]: mean of log prob of sampled actions (squash corrected)
def __str__(self):
result = [
"policy head:"
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = {0})".format(2*self.num_actions),
"policy_mu = output[:num_actions], policy_std = output[num_actions:]"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/sac_head.py | 0.847274 | 0.292829 | sac_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import BoxActionSpace
from rl_coach.spaces import SpacesDefinition
class NAFHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True,activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
if not isinstance(self.spaces.action, BoxActionSpace):
raise ValueError("NAF works only for continuous action spaces (BoxActionSpace)")
self.name = 'naf_q_values_head'
self.num_actions = self.spaces.action.shape[0]
self.output_scale = self.spaces.action.max_abs_range
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
def _build_module(self, input_layer):
# NAF
self.action = tf.placeholder(tf.float32, [None, self.num_actions], name="action")
self.input = self.action
# V Head
self.V = self.dense_layer(1)(input_layer, name='V')
# mu Head
mu_unscaled = self.dense_layer(self.num_actions)(input_layer, activation=self.activation_function, name='mu_unscaled')
self.mu = tf.multiply(mu_unscaled, self.output_scale, name='mu')
# A Head
# l_vector is a vector that includes a lower-triangular matrix values
self.l_vector = self.dense_layer((self.num_actions * (self.num_actions + 1)) / 2)(input_layer, name='l_vector')
# Convert l to a lower triangular matrix and exponentiate its diagonal
i = 0
columns = []
for col in range(self.num_actions):
start_row = col
num_non_zero_elements = self.num_actions - start_row
zeros_column_part = tf.zeros_like(self.l_vector[:, 0:start_row])
diag_element = tf.expand_dims(tf.exp(self.l_vector[:, i]), 1)
non_zeros_non_diag_column_part = self.l_vector[:, (i + 1):(i + num_non_zero_elements)]
columns.append(tf.concat([zeros_column_part, diag_element, non_zeros_non_diag_column_part], axis=1))
i += num_non_zero_elements
self.L = tf.transpose(tf.stack(columns, axis=1), (0, 2, 1))
# P = L*L^T
self.P = tf.matmul(self.L, tf.transpose(self.L, (0, 2, 1)))
# A = -1/2 * (u - mu)^T * P * (u - mu)
action_diff = tf.expand_dims(self.action - self.mu, -1)
a_matrix_form = -0.5 * tf.matmul(tf.transpose(action_diff, (0, 2, 1)), tf.matmul(self.P, action_diff))
self.A = tf.reshape(a_matrix_form, [-1, 1])
# Q Head
self.Q = tf.add(self.V, self.A, name='Q')
self.output = self.Q
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 1)",
"Action Advantage Stream - A",
"\tDense (num outputs = {})".format((self.num_actions * (self.num_actions + 1)) / 2),
"\tReshape to lower triangular matrix L (new size = {} x {})".format(self.num_actions, self.num_actions),
"\tP = L*L^T",
"\tA = -1/2 * (u - mu)^T * P * (u - mu)",
"Action Stream - mu",
"\tDense (num outputs = {})".format(self.num_actions),
"\tActivation (type = {})".format(self.activation_function.__name__),
"\tMultiply (factor = {})".format(self.output_scale),
"State-Action Value Stream - Q",
"\tAdd (V, A)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/naf_head.py | 0.894011 | 0.314011 | naf_head.py | pypi |
import numpy as np
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.exploration_policies.continuous_entropy import ContinuousEntropyParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace, CompoundActionSpace
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps, indent_string
class PolicyHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'policy_values_head'
self.return_type = ActionProbabilities
self.beta = None
self.action_penalty = None
self.exploration_policy = agent_parameters.exploration
# a scalar weight that penalizes low entropy values to encourage exploration
if hasattr(agent_parameters.algorithm, 'beta_entropy'):
# we set the beta value as a tf variable so it can be updated later if needed
self.beta = tf.Variable(float(agent_parameters.algorithm.beta_entropy),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.beta_placeholder = tf.placeholder('float')
self.set_beta = tf.assign(self.beta, self.beta_placeholder)
# a scalar weight that penalizes high activation values (before the activation function) for the final layer
if hasattr(agent_parameters.algorithm, 'action_penalty'):
self.action_penalty = agent_parameters.algorithm.action_penalty
def _build_module(self, input_layer):
self.actions = []
self.input = self.actions
self.policy_distributions = []
self.output = []
action_spaces = [self.spaces.action]
if isinstance(self.spaces.action, CompoundActionSpace):
action_spaces = self.spaces.action.sub_action_spaces
# create a compound action network
for action_space_idx, action_space in enumerate(action_spaces):
with tf.variable_scope("sub_action_{}".format(action_space_idx)):
if isinstance(action_space, DiscreteActionSpace):
# create a discrete action network (softmax probabilities output)
self._build_discrete_net(input_layer, action_space)
elif isinstance(action_space, BoxActionSpace):
# create a continuous action network (bounded mean and stdev outputs)
self._build_continuous_net(input_layer, action_space)
if self.is_local:
# add entropy regularization
if self.beta:
self.entropy = tf.add_n([tf.reduce_mean(dist.entropy()) for dist in self.policy_distributions])
self.regularizations += [-tf.multiply(self.beta, self.entropy, name='entropy_regularization')]
# calculate loss
self.action_log_probs_wrt_policy = \
tf.add_n([dist.log_prob(action) for dist, action in zip(self.policy_distributions, self.actions)])
self.advantages = tf.placeholder(tf.float32, [None], name="advantages")
self.target = self.advantages
self.loss = -tf.reduce_mean(self.action_log_probs_wrt_policy * self.advantages)
tf.losses.add_loss(self.loss_weight[0] * self.loss)
def _build_discrete_net(self, input_layer, action_space):
num_actions = len(action_space.actions)
self.actions.append(tf.placeholder(tf.int32, [None], name="actions"))
policy_values = self.dense_layer(num_actions)(input_layer, name='fc')
self.policy_probs = tf.nn.softmax(policy_values, name="policy")
# define the distributions for the policy and the old policy
# (the + eps is to prevent probability 0 which will cause the log later on to be -inf)
policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_probs + eps))
self.policy_distributions.append(policy_distribution)
self.output.append(self.policy_probs)
def _build_continuous_net(self, input_layer, action_space):
num_actions = action_space.shape
self.actions.append(tf.placeholder(tf.float32, [None, num_actions], name="actions"))
# output activation function
if np.all(action_space.max_abs_range < np.inf):
# bounded actions
self.output_scale = action_space.max_abs_range
self.continuous_output_activation = self.activation_function
else:
# unbounded actions
self.output_scale = 1
self.continuous_output_activation = None
# mean
pre_activation_policy_values_mean = self.dense_layer(num_actions)(input_layer, name='fc_mean')
policy_values_mean = self.continuous_output_activation(pre_activation_policy_values_mean)
self.policy_mean = tf.multiply(policy_values_mean, self.output_scale, name='output_mean')
self.output.append(self.policy_mean)
# standard deviation
if isinstance(self.exploration_policy, ContinuousEntropyParameters):
# the stdev is an output of the network and uses a softplus activation as defined in A3C
policy_values_std = self.dense_layer(num_actions)(input_layer,
kernel_initializer=normalized_columns_initializer(0.01),
name='fc_std')
self.policy_std = tf.nn.softplus(policy_values_std, name='output_variance') + eps
self.output.append(self.policy_std)
else:
# the stdev is an externally given value
# Warning: we need to explicitly put this variable in the local variables collections, since defining
# it as not trainable puts it for some reason in the global variables collections. If this is not done,
# the variable won't be initialized and when working with multiple workers they will get stuck.
self.policy_std = tf.Variable(np.ones(num_actions), dtype='float32', trainable=False,
name='policy_stdev', collections=[tf.GraphKeys.LOCAL_VARIABLES])
# assign op for the policy std
self.policy_std_placeholder = tf.placeholder('float32', (num_actions,))
self.assign_policy_std = tf.assign(self.policy_std, self.policy_std_placeholder)
# define the distributions for the policy and the old policy
policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.policy_mean, self.policy_std)
self.policy_distributions.append(policy_distribution)
if self.is_local:
# add a squared penalty on the squared pre-activation features of the action
if self.action_penalty and self.action_penalty != 0:
self.regularizations += [
self.action_penalty * tf.reduce_mean(tf.square(pre_activation_policy_values_mean))]
def __str__(self):
action_spaces = [self.spaces.action]
if isinstance(self.spaces.action, CompoundActionSpace):
action_spaces = self.spaces.action.sub_action_spaces
result = []
for action_space_idx, action_space in enumerate(action_spaces):
action_head_mean_result = []
if isinstance(action_space, DiscreteActionSpace):
# create a discrete action network (softmax probabilities output)
action_head_mean_result.append("Dense (num outputs = {})".format(len(action_space.actions)))
action_head_mean_result.append("Softmax")
elif isinstance(action_space, BoxActionSpace):
# create a continuous action network (bounded mean and stdev outputs)
action_head_mean_result.append("Dense (num outputs = {})".format(action_space.shape))
if np.all(action_space.max_abs_range < np.inf):
# bounded actions
action_head_mean_result.append("Activation (type = {})".format(self.activation_function.__name__))
action_head_mean_result.append("Multiply (factor = {})".format(action_space.max_abs_range))
action_head_stdev_result = []
if isinstance(self.exploration_policy, ContinuousEntropyParameters):
action_head_stdev_result.append("Dense (num outputs = {})".format(action_space.shape))
action_head_stdev_result.append("Softplus")
action_head_result = []
if action_head_stdev_result:
action_head_result.append("Mean Stream")
action_head_result.append(indent_string('\n'.join(action_head_mean_result)))
action_head_result.append("Stdev Stream")
action_head_result.append(indent_string('\n'.join(action_head_stdev_result)))
else:
action_head_result.append('\n'.join(action_head_mean_result))
if len(action_spaces) > 1:
result.append("Action head {}".format(action_space_idx))
result.append(indent_string('\n'.join(action_head_result)))
else:
result.append('\n'.join(action_head_result))
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/policy_head.py | 0.855987 | 0.361052 | policy_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace
class SACQHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'q_values_head'
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = self.spaces.action.shape # continuous actions
else:
raise ValueError(
'SACQHead does not support action spaces of type: {class_name}'.format(
class_name=self.spaces.action.__class__.__name__,
)
)
self.return_type = QActionStateValue
# extract the topology from the SACQHeadParameters
self.network_layers_sizes = agent_parameters.network_wrappers['q'].heads_parameters[0].network_layers_sizes
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# SAC Q network is basically 2 networks running in parallel on the same input (state , action)
# state is the observation fed through the input_layer, action is fed through placeholder to the header
# each is calculating q value : q1(s,a) and q2(s,a)
# the output of the head is min(q1,q2)
self.actions = tf.placeholder(tf.float32, [None, self.num_actions], name="actions")
self.target = tf.placeholder(tf.float32, [None, 1], name="q_targets")
self.input = [self.actions]
self.output = []
# Note (1) : in the author's implementation of sac (in rllab) they summarize the embedding of observation and
# action (broadcasting the bias) in the first layer of the network.
# build q1 network head
with tf.variable_scope("q1_head"):
layer_size = self.network_layers_sizes[0]
qi_obs_emb = self.dense_layer(layer_size)(input_layer, activation=self.activation_function)
qi_act_emb = self.dense_layer(layer_size)(self.actions, activation=self.activation_function)
qi_output = qi_obs_emb + qi_act_emb # merging the inputs by summarizing them (see Note (1))
for layer_size in self.network_layers_sizes[1:]:
qi_output = self.dense_layer(layer_size)(qi_output, activation=self.activation_function)
# the output layer
self.q1_output = self.dense_layer(1)(qi_output, name='q1_output',
bias_initializer=self.output_bias_initializer)
# build q2 network head
with tf.variable_scope("q2_head"):
layer_size = self.network_layers_sizes[0]
qi_obs_emb = self.dense_layer(layer_size)(input_layer, activation=self.activation_function)
qi_act_emb = self.dense_layer(layer_size)(self.actions, activation=self.activation_function)
qi_output = qi_obs_emb + qi_act_emb # merging the inputs by summarizing them (see Note (1))
for layer_size in self.network_layers_sizes[1:]:
qi_output = self.dense_layer(layer_size)(qi_output, activation=self.activation_function)
# the output layer
self.q2_output = self.dense_layer(1)(qi_output, name='q2_output',
bias_initializer=self.output_bias_initializer)
# take the minimum as the network's output. this is the log_target (in the original implementation)
self.q_output = tf.minimum(self.q1_output, self.q2_output, name='q_output')
# the policy gradients
# self.q_output_mean = tf.reduce_mean(self.q1_output) # option 1: use q1
self.q_output_mean = tf.reduce_mean(self.q_output) # option 2: use min(q1,q2)
self.output.append(self.q_output)
self.output.append(self.q_output_mean)
# defining the loss
self.q1_loss = 0.5*tf.reduce_mean(tf.square(self.q1_output - self.target))
self.q2_loss = 0.5*tf.reduce_mean(tf.square(self.q2_output - self.target))
# eventually both losses are depends on different parameters so we can sum them up
self.loss = self.q1_loss+self.q2_loss
tf.losses.add_loss(self.loss)
def __str__(self):
result = [
"q1 output"
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = 1)",
"q2 output"
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = 256)",
"\t\tDense (num outputs = 1)",
"min(Q1,Q2)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/sac_q_head.py | 0.877857 | 0.369599 | sac_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import VStateValue
from rl_coach.spaces import SpacesDefinition
class VHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, initializer='normalized_columns', output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'v_values_head'
self.return_type = VStateValue
if agent_parameters.network_wrappers[self.network_name.split('/')[0]].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
self.initializer = initializer
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard V Network
if self.initializer == 'normalized_columns':
self.output = self.dense_layer(1)(input_layer, name='output',
kernel_initializer=normalized_columns_initializer(1.0),
bias_initializer=self.output_bias_initializer)
elif self.initializer == 'xavier' or self.initializer is None:
self.output = self.dense_layer(1)(input_layer, name='output',
bias_initializer=self.output_bias_initializer)
def __str__(self):
result = [
"Dense (num outputs = 1)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/v_head.py | 0.870515 | 0.251165 | v_head.py | pypi |
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class CategoricalQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str ='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, output_bias_initializer=output_bias_initializer)
self.name = 'categorical_dqn_head'
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms
self.z_values = tf.cast(tf.constant(np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max,
self.ap.algorithm.atoms), dtype=tf.float32), dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
values_distribution = self.dense_layer(self.num_actions * self.num_atoms)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
values_distribution = tf.reshape(values_distribution, (tf.shape(values_distribution)[0], self.num_actions,
self.num_atoms))
# softmax on atoms dimension
self.output = tf.nn.softmax(values_distribution)
# calculate cross entropy loss
self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms),
name="distributions")
self.target = self.distributions
self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution)
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.z_values, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions * self.num_atoms),
"Reshape (output size = {} x {})".format(self.num_actions, self.num_atoms),
"Softmax"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/categorical_q_head.py | 0.885786 | 0.367951 | categorical_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.q_head import QHead
from rl_coach.base_parameters import AgentParameters
from rl_coach.memories.non_episodic import differentiable_neural_dictionary
from rl_coach.spaces import SpacesDefinition
class DNDQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'dnd_q_values_head'
self.DND_size = agent_parameters.algorithm.dnd_size
self.DND_key_error_threshold = agent_parameters.algorithm.DND_key_error_threshold
self.l2_norm_added_delta = agent_parameters.algorithm.l2_norm_added_delta
self.new_value_shift_coefficient = agent_parameters.algorithm.new_value_shift_coefficient
self.number_of_nn = agent_parameters.algorithm.number_of_knn
self.ap = agent_parameters
self.dnd_embeddings = [None] * self.num_actions
self.dnd_values = [None] * self.num_actions
self.dnd_indices = [None] * self.num_actions
self.dnd_distances = [None] * self.num_actions
if self.ap.memory.shared_memory:
self.shared_memory_scratchpad = self.ap.task_parameters.shared_memory_scratchpad
def _build_module(self, input_layer):
if hasattr(self.ap.task_parameters, 'checkpoint_restore_path') and\
self.ap.task_parameters.checkpoint_restore_path:
self.DND = differentiable_neural_dictionary.load_dnd(self.ap.task_parameters.checkpoint_restore_path)
else:
self.DND = differentiable_neural_dictionary.QDND(
self.DND_size, input_layer.get_shape()[-1], self.num_actions, self.new_value_shift_coefficient,
key_error_threshold=self.DND_key_error_threshold,
learning_rate=self.network_parameters.learning_rate,
num_neighbors=self.number_of_nn,
override_existing_keys=True)
# Retrieve info from DND dictionary
# We assume that all actions have enough entries in the DND
self.q_values = self.output = tf.transpose([
self._q_value(input_layer, action)
for action in range(self.num_actions)
])
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def _q_value(self, input_layer, action):
result = tf.py_func(self.DND.query,
[input_layer, action, self.number_of_nn],
[tf.float64, tf.float64, tf.int64])
self.dnd_embeddings[action] = tf.to_float(result[0])
self.dnd_values[action] = tf.to_float(result[1])
self.dnd_indices[action] = result[2]
# DND calculation
square_diff = tf.square(self.dnd_embeddings[action] - tf.expand_dims(input_layer, 1))
distances = tf.reduce_sum(square_diff, axis=2) + [self.l2_norm_added_delta]
self.dnd_distances[action] = distances
weights = 1.0 / distances
normalised_weights = weights / tf.reduce_sum(weights, axis=1, keep_dims=True)
q_value = tf.reduce_sum(self.dnd_values[action] * normalised_weights, axis=1)
q_value.set_shape((None,))
return q_value
def _post_build(self):
# DND gradients
self.dnd_embeddings_grad = tf.gradients(self.loss[0], self.dnd_embeddings)
self.dnd_values_grad = tf.gradients(self.loss[0], self.dnd_values)
def __str__(self):
result = [
"DND fetch (num outputs = {})".format(self.num_actions)
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/dnd_q_head.py | 0.845209 | 0.378028 | dnd_q_head.py | pypi |
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.losses.losses_impl import Reduction
from rl_coach.architectures.tensorflow_components.layers import Dense, convert_layer_class
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import force_list
from rl_coach.architectures.tensorflow_components.utils import squeeze_tensor
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class Head(object):
"""
A head is the final part of the network. It takes the embedding from the middleware embedder and passes it through
a neural network to produce the output of the network. There can be multiple heads in a network, and each one has
an assigned loss function. The heads are algorithm dependent.
"""
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int=0, loss_weight: float=1., is_local: bool=True, activation_function: str='relu',
dense_layer=Dense, is_training=False):
self.head_idx = head_idx
self.network_name = network_name
self.network_parameters = agent_parameters.network_wrappers[self.network_name]
self.name = "head"
self.output = []
self.loss = []
self.loss_type = []
self.regularizations = []
self.loss_weight = tf.Variable([float(w) for w in force_list(loss_weight)],
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.target = []
self.importance_weight = []
self.input = []
self.is_local = is_local
self.ap = agent_parameters
self.spaces = spaces
self.return_type = None
self.activation_function = activation_function
self.dense_layer = dense_layer
if self.dense_layer is None:
self.dense_layer = Dense
else:
self.dense_layer = convert_layer_class(self.dense_layer)
self.is_training = is_training
def __call__(self, input_layer):
"""
Wrapper for building the module graph including scoping and loss creation
:param input_layer: the input to the graph
:return: the output of the last layer and the target placeholder
"""
with tf.variable_scope(self.get_name(), initializer=tf.contrib.layers.xavier_initializer()):
self._build_module(squeeze_tensor(input_layer))
self.output = force_list(self.output)
self.target = force_list(self.target)
self.input = force_list(self.input)
self.loss_type = force_list(self.loss_type)
self.loss = force_list(self.loss)
self.regularizations = force_list(self.regularizations)
if self.is_local:
self.set_loss()
self._post_build()
if self.is_local:
return self.output, self.target, self.input, self.importance_weight
else:
return self.output, self.input
def _build_module(self, input_layer):
"""
Builds the graph of the module
This method is called early on from __call__. It is expected to store the graph
in self.output.
:param input_layer: the input to the graph
:return: None
"""
pass
def _post_build(self):
"""
Optional function that allows adding any extra definitions after the head has been fully defined
For example, this allows doing additional calculations that are based on the loss
:return: None
"""
pass
def get_name(self):
"""
Get a formatted name for the module
:return: the formatted name
"""
return '{}_{}'.format(self.name, self.head_idx)
def set_loss(self):
"""
Creates a target placeholder and loss function for each loss_type and regularization
:param loss_type: a tensorflow loss function
:param scope: the name scope to include the tensors in
:return: None
"""
# there are heads that define the loss internally, but we need to create additional placeholders for them
for idx in range(len(self.loss)):
importance_weight = tf.placeholder('float',
[None] + [1] * (len(self.target[idx].shape) - 1),
'{}_importance_weight'.format(self.get_name()))
self.importance_weight.append(importance_weight)
# add losses and target placeholder
for idx in range(len(self.loss_type)):
# create target placeholder
target = tf.placeholder('float', self.output[idx].shape, '{}_target'.format(self.get_name()))
self.target.append(target)
# create importance sampling weights placeholder
num_target_dims = len(self.target[idx].shape)
importance_weight = tf.placeholder('float', [None] + [1] * (num_target_dims - 1),
'{}_importance_weight'.format(self.get_name()))
self.importance_weight.append(importance_weight)
# compute the weighted loss. importance_weight weights over the samples in the batch, while self.loss_weight
# weights the specific loss of this head against other losses in this head or in other heads
loss_weight = self.loss_weight[idx]*importance_weight
loss = self.loss_type[idx](self.target[-1], self.output[idx],
scope=self.get_name(), reduction=Reduction.NONE, loss_collection=None)
# the loss is first summed over each sample in the batch and then the mean over the batch is taken
loss = tf.reduce_mean(loss_weight*tf.reduce_sum(loss, axis=list(range(1, num_target_dims))))
# we add the loss to the losses collection and later we will extract it in general_network
tf.losses.add_loss(loss)
self.loss.append(loss)
# add regularizations
for regularization in self.regularizations:
self.loss.append(regularization)
tf.losses.add_loss(regularization)
@classmethod
def path(cls):
return cls.__class__.__name__ | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/head.py | 0.903272 | 0.46132 | head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import DiscreteActionSpace
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps
class ACERPolicyHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'acer_policy_head'
self.return_type = ActionProbabilities
self.beta = None
self.action_penalty = None
# a scalar weight that penalizes low entropy values to encourage exploration
if hasattr(agent_parameters.algorithm, 'beta_entropy'):
# we set the beta value as a tf variable so it can be updated later if needed
self.beta = tf.Variable(float(agent_parameters.algorithm.beta_entropy),
trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.beta_placeholder = tf.placeholder('float')
self.set_beta = tf.assign(self.beta, self.beta_placeholder)
def _build_module(self, input_layer):
if isinstance(self.spaces.action, DiscreteActionSpace):
# create a discrete action network (softmax probabilities output)
self._build_discrete_net(input_layer, self.spaces.action)
else:
raise ValueError("only discrete action spaces are supported for ACER")
if self.is_local:
# add entropy regularization
if self.beta:
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
self.regularizations += [-tf.multiply(self.beta, self.entropy, name='entropy_regularization')]
# Truncated importance sampling with bias corrections
importance_sampling_weight = tf.placeholder(tf.float32, [None, self.num_actions],
name='{}_importance_sampling_weight'.format(self.get_name()))
self.input.append(importance_sampling_weight)
importance_sampling_weight_i = tf.placeholder(tf.float32, [None],
name='{}_importance_sampling_weight_i'.format(self.get_name()))
self.input.append(importance_sampling_weight_i)
V_values = tf.placeholder(tf.float32, [None], name='{}_V_values'.format(self.get_name()))
self.target.append(V_values)
Q_values = tf.placeholder(tf.float32, [None, self.num_actions], name='{}_Q_values'.format(self.get_name()))
self.input.append(Q_values)
Q_retrace = tf.placeholder(tf.float32, [None], name='{}_Q_retrace'.format(self.get_name()))
self.input.append(Q_retrace)
action_log_probs_wrt_policy = self.policy_distribution.log_prob(self.actions)
self.probability_loss = -tf.reduce_mean(action_log_probs_wrt_policy
* (Q_retrace - V_values)
* tf.minimum(self.ap.algorithm.importance_weight_truncation,
importance_sampling_weight_i))
log_probs_wrt_policy = tf.log(self.policy_probs + eps)
bias_correction_gain = tf.reduce_sum(log_probs_wrt_policy
* (Q_values - tf.expand_dims(V_values, 1))
* tf.nn.relu(1.0 - (self.ap.algorithm.importance_weight_truncation
/ (importance_sampling_weight + eps)))
* tf.stop_gradient(self.policy_probs),
axis=1)
self.bias_correction_loss = -tf.reduce_mean(bias_correction_gain)
self.loss = self.probability_loss + self.bias_correction_loss
tf.losses.add_loss(self.loss)
# Trust region
batch_size = tf.to_float(tf.shape(input_layer)[0])
average_policy = tf.placeholder(tf.float32, [None, self.num_actions],
name='{}_average_policy'.format(self.get_name()))
self.input.append(average_policy)
average_policy_distribution = tf.contrib.distributions.Categorical(probs=(average_policy + eps))
self.kl_divergence = tf.reduce_mean(tf.distributions.kl_divergence(average_policy_distribution,
self.policy_distribution))
if self.ap.algorithm.use_trust_region_optimization:
@tf.custom_gradient
def trust_region_layer(x):
def grad(g):
g = - g * batch_size
k = - average_policy / (self.policy_probs + eps)
adj = tf.nn.relu(
(tf.reduce_sum(k * g, axis=1) - self.ap.algorithm.max_KL_divergence)
/ (tf.reduce_sum(tf.square(k), axis=1) + eps))
g = g - tf.expand_dims(adj, 1) * k
return - g / batch_size
return tf.identity(x), grad
self.output = trust_region_layer(self.output)
def _build_discrete_net(self, input_layer, action_space):
self.num_actions = len(action_space.actions)
self.actions = tf.placeholder(tf.int32, [None], name='{}_actions'.format(self.get_name()))
self.input.append(self.actions)
policy_values = self.dense_layer(self.num_actions)(input_layer, name='fc')
self.policy_probs = tf.nn.softmax(policy_values, name='{}_policy'.format(self.get_name()))
# (the + eps is to prevent probability 0 which will cause the log later on to be -inf)
self.policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_probs + eps))
self.output = self.policy_probs | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/acer_policy_head.py | 0.894141 | 0.337558 | acer_policy_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import batchnorm_activation_dropout, Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
class DDPGActor(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh',
batchnorm: bool=True, dense_layer=Dense, is_training=False):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, is_training=is_training)
self.name = 'ddpg_actor_head'
self.return_type = ActionProbabilities
self.num_actions = self.spaces.action.shape
self.batchnorm = batchnorm
# bounded actions
self.output_scale = self.spaces.action.max_abs_range
# a scalar weight that penalizes high activation values (before the activation function) for the final layer
if hasattr(agent_parameters.algorithm, 'action_penalty'):
self.action_penalty = agent_parameters.algorithm.action_penalty
def _build_module(self, input_layer):
# mean
pre_activation_policy_values_mean = self.dense_layer(self.num_actions)(input_layer, name='fc_mean')
policy_values_mean = batchnorm_activation_dropout(input_layer=pre_activation_policy_values_mean,
batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout_rate=0,
is_training=self.is_training,
name="BatchnormActivationDropout_0")[-1]
self.policy_mean = tf.multiply(policy_values_mean, self.output_scale, name='output_mean')
if self.is_local:
# add a squared penalty on the squared pre-activation features of the action
if self.action_penalty and self.action_penalty != 0:
self.regularizations += \
[self.action_penalty * tf.reduce_mean(tf.square(pre_activation_policy_values_mean))]
self.output = [self.policy_mean]
def __str__(self):
result = [
'Dense (num outputs = {})'.format(self.num_actions[0])
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/ddpg_actor_head.py | 0.901496 | 0.274676 | ddpg_actor_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import Measurements
from rl_coach.spaces import SpacesDefinition
class MeasurementsPredictionHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'future_measurements_head'
self.num_actions = len(self.spaces.action.actions)
self.num_measurements = self.spaces.state['measurements'].shape[0]
self.num_prediction_steps = agent_parameters.algorithm.num_predicted_steps_ahead
self.multi_step_measurements_size = self.num_measurements * self.num_prediction_steps
self.return_type = Measurements
def _build_module(self, input_layer):
# This is almost exactly the same as Dueling Network but we predict the future measurements for each action
# actions expectation tower (expectation stream) - E
with tf.variable_scope("expectation_stream"):
expectation_stream = self.dense_layer(256)(input_layer, activation=self.activation_function, name='fc1')
expectation_stream = self.dense_layer(self.multi_step_measurements_size)(expectation_stream, name='output')
expectation_stream = tf.expand_dims(expectation_stream, axis=1)
# action fine differences tower (action stream) - A
with tf.variable_scope("action_stream"):
action_stream = self.dense_layer(256)(input_layer, activation=self.activation_function, name='fc1')
action_stream = self.dense_layer(self.num_actions * self.multi_step_measurements_size)(action_stream,
name='output')
action_stream = tf.reshape(action_stream,
(tf.shape(action_stream)[0], self.num_actions, self.multi_step_measurements_size))
action_stream = action_stream - tf.reduce_mean(action_stream, reduction_indices=1, keepdims=True)
# merge to future measurements predictions
self.output = tf.add(expectation_stream, action_stream, name='output')
self.target = tf.placeholder(tf.float32, [None, self.num_actions, self.multi_step_measurements_size],
name="targets")
targets_nonan = tf.where(tf.is_nan(self.target), self.output, self.target)
self.loss = tf.reduce_sum(tf.reduce_mean(tf.square(targets_nonan - self.output), reduction_indices=0))
tf.losses.add_loss(self.loss_weight[0] * self.loss)
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 256)",
"\tDense (num outputs = {})".format(self.multi_step_measurements_size),
"Action Advantage Stream - A",
"\tDense (num outputs = 256)",
"\tDense (num outputs = {})".format(self.num_actions * self.multi_step_measurements_size),
"\tReshape (new size = {} x {})".format(self.num_actions, self.multi_step_measurements_size),
"\tSubtract(A, Mean(A))".format(self.num_actions),
"Add (V, A)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/measurements_prediction_head.py | 0.903543 | 0.402568 | measurements_prediction_head.py | pypi |
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class RainbowQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms
self.name = 'rainbow_q_values_head'
self.z_values = tf.cast(tf.constant(np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max,
self.ap.algorithm.atoms), dtype=tf.float32), dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
# state value tower - V
with tf.variable_scope("state_value"):
state_value = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
state_value = self.dense_layer(self.num_atoms)(state_value, name='fc2')
state_value = tf.expand_dims(state_value, axis=1)
# action advantage tower - A
with tf.variable_scope("action_advantage"):
action_advantage = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')
action_advantage = self.dense_layer(self.num_actions * self.num_atoms)(action_advantage, name='fc2')
action_advantage = tf.reshape(action_advantage, (tf.shape(input_layer)[0], self.num_actions,
self.num_atoms))
action_mean = tf.reduce_mean(action_advantage, axis=1, keepdims=True)
action_advantage = action_advantage - action_mean
# merge to state-action value function Q
values_distribution = tf.add(state_value, action_advantage, name='output')
# softmax on atoms dimension
self.output = tf.nn.softmax(values_distribution)
# calculate cross entropy loss
self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms),
name="distributions")
self.target = self.distributions
self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution)
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.z_values, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"State Value Stream - V",
"\tDense (num outputs = 512)",
"\tDense (num outputs = {})".format(self.num_atoms),
"Action Advantage Stream - A",
"\tDense (num outputs = 512)",
"\tDense (num outputs = {})".format(self.num_actions * self.num_atoms),
"\tReshape (new size = {} x {})".format(self.num_actions, self.num_atoms),
"\tSubtract(A, Mean(A))".format(self.num_actions),
"Add (V, A)",
"Softmax"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py | 0.899351 | 0.368207 | rainbow_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition
class PPOVHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'ppo_v_head'
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.return_type = ActionProbabilities
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
self.old_policy_value = tf.placeholder(tf.float32, [None], "old_policy_values")
self.input = [self.old_policy_value]
self.output = self.dense_layer(1)(input_layer, name='output',
kernel_initializer=normalized_columns_initializer(1.0),
bias_initializer=self.output_bias_initializer)
self.target = self.total_return = tf.placeholder(tf.float32, [None], name="total_return")
value_loss_1 = tf.square(self.output - self.target)
value_loss_2 = tf.square(self.old_policy_value +
tf.clip_by_value(self.output - self.old_policy_value,
-self.clip_likelihood_ratio_using_epsilon,
self.clip_likelihood_ratio_using_epsilon) - self.target)
self.vf_loss = tf.reduce_mean(tf.maximum(value_loss_1, value_loss_2))
self.loss = self.vf_loss
tf.losses.add_loss(self.loss)
def __str__(self):
result = [
"Dense (num outputs = 1)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/ppo_v_head.py | 0.890485 | 0.274899 | ppo_v_head.py | pypi |
import tensorflow as tf
import numpy as np
from rl_coach.architectures.tensorflow_components.heads import QHead
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
class QuantileRegressionQHead(QHead):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, output_bias_initializer=output_bias_initializer)
self.name = 'quantile_regression_dqn_head'
self.num_actions = len(self.spaces.action.actions)
self.num_atoms = agent_parameters.algorithm.atoms # we use atom / quantile interchangeably
self.huber_loss_interval = agent_parameters.algorithm.huber_loss_interval # k
self.quantile_probabilities = tf.cast(
tf.constant(np.ones(self.ap.algorithm.atoms) / float(self.ap.algorithm.atoms), dtype=tf.float32),
dtype=tf.float64)
self.loss_type = []
def _build_module(self, input_layer):
self.actions = tf.placeholder(tf.int32, [None, 2], name="actions")
self.quantile_midpoints = tf.placeholder(tf.float32, [None, self.num_atoms], name="quantile_midpoints")
self.input = [self.actions, self.quantile_midpoints]
# the output of the head is the N unordered quantile locations {theta_1, ..., theta_N}
quantiles_locations = self.dense_layer(self.num_actions * self.num_atoms)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
quantiles_locations = tf.reshape(quantiles_locations, (tf.shape(quantiles_locations)[0], self.num_actions, self.num_atoms))
self.output = quantiles_locations
self.quantiles = tf.placeholder(tf.float32, shape=(None, self.num_atoms), name="quantiles")
self.target = self.quantiles
# only the quantiles of the taken action are taken into account
quantiles_for_used_actions = tf.gather_nd(quantiles_locations, self.actions)
# reorder the output quantiles and the target quantiles as a preparation step for calculating the loss
# the output quantiles vector and the quantile midpoints are tiled as rows of a NxN matrix (N = num quantiles)
# the target quantiles vector is tiled as column of a NxN matrix
theta_i = tf.tile(tf.expand_dims(quantiles_for_used_actions, -1), [1, 1, self.num_atoms])
T_theta_j = tf.tile(tf.expand_dims(self.target, -2), [1, self.num_atoms, 1])
tau_i = tf.tile(tf.expand_dims(self.quantile_midpoints, -1), [1, 1, self.num_atoms])
# Huber loss of T(theta_j) - theta_i
error = T_theta_j - theta_i
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, self.huber_loss_interval)
huber_loss = self.huber_loss_interval * (abs_error - quadratic) + 0.5 * quadratic ** 2
# Quantile Huber loss
quantile_huber_loss = tf.abs(tau_i - tf.cast(error < 0, dtype=tf.float32)) * huber_loss
# Quantile regression loss (the probability for each quantile is 1/num_quantiles)
quantile_regression_loss = tf.reduce_sum(quantile_huber_loss) / float(self.num_atoms)
self.loss = quantile_regression_loss
tf.losses.add_loss(self.loss)
self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.quantile_probabilities, 1)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions * self.num_atoms),
"Reshape (new size = {} x {})".format(self.num_actions, self.num_atoms)
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/quantile_regression_q_head.py | 0.910398 | 0.541045 | quantile_regression_q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
class QHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'q_values_head'
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = 1
elif isinstance(self.spaces.action, DiscreteActionSpace):
self.num_actions = len(self.spaces.action.actions)
else:
raise ValueError(
'QHead does not support action spaces of type: {class_name}'.format(
class_name=self.spaces.action.__class__.__name__,
)
)
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard Q Network
self.q_values = self.output = self.dense_layer(self.num_actions)\
(input_layer, name='output', bias_initializer=self.output_bias_initializer)
# used in batch-rl to estimate a probablity distribution over actions
self.softmax = self.add_softmax_with_temperature()
def __str__(self):
result = [
"Dense (num outputs = {})".format(self.num_actions)
]
return '\n'.join(result)
def add_softmax_with_temperature(self):
temperature = self.ap.network_wrappers[self.network_name].softmax_temperature
temperature_scaled_outputs = self.q_values / temperature
return tf.nn.softmax(temperature_scaled_outputs, name="softmax") | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/q_head.py | 0.892668 | 0.270673 | q_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import VStateValue
from rl_coach.spaces import SpacesDefinition
class TD3VHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, initializer='xavier', output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'td3_v_values_head'
self.return_type = VStateValue
self.loss_type = []
self.initializer = initializer
self.loss = []
self.output = []
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
# Standard V Network
q_outputs = []
self.target = tf.placeholder(tf.float32, shape=(None, 1), name="q_networks_min_placeholder")
for i in range(input_layer.shape[0]): # assuming that the actual size is 2, as there are two critic networks
if self.initializer == 'normalized_columns':
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1),
kernel_initializer=normalized_columns_initializer(1.0),
bias_initializer=self.output_bias_initializer),)
elif self.initializer == 'xavier' or self.initializer is None:
q_outputs.append(self.dense_layer(1)(input_layer[i], name='q_output_{}'.format(i + 1),
bias_initializer=self.output_bias_initializer))
self.output.append(q_outputs[i])
self.loss.append(tf.reduce_mean((self.target-q_outputs[i])**2))
self.output.append(tf.reduce_min(q_outputs, axis=0))
self.output.append(tf.reduce_mean(self.output[0]))
self.loss = sum(self.loss)
tf.losses.add_loss(self.loss)
def __str__(self):
result = [
"Q1 Action-Value Stream",
"\tDense (num outputs = 1)",
"Q2 Action-Value Stream",
"\tDense (num outputs = 1)",
"Min (Q1, Q2)"
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/td3_v_head.py | 0.869008 | 0.284511 | td3_v_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import QActionStateValue
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
from rl_coach.utils import force_list
class RegressionHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',
dense_layer=Dense, scheme=[Dense(256), Dense(256)], output_bias_initializer=None):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.name = 'regression_head'
self.scheme = scheme
self.layers = []
if isinstance(self.spaces.action, BoxActionSpace):
self.num_actions = self.spaces.action.shape[0]
elif isinstance(self.spaces.action, DiscreteActionSpace):
self.num_actions = len(self.spaces.action.actions)
self.return_type = QActionStateValue
if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss:
self.loss_type = tf.losses.huber_loss
else:
self.loss_type = tf.losses.mean_squared_error
self.output_bias_initializer = output_bias_initializer
def _build_module(self, input_layer):
self.layers.append(input_layer)
for idx, layer_params in enumerate(self.scheme):
self.layers.extend(force_list(
layer_params(input_layer=self.layers[-1], name='{}_{}'.format(layer_params.__class__.__name__, idx))
))
self.layers.append(self.dense_layer(self.num_actions)(self.layers[-1], name='output',
bias_initializer=self.output_bias_initializer))
self.output = self.layers[-1]
def __str__(self):
result = []
for layer in self.layers:
result.append(str(layer))
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/cil_head.py | 0.838812 | 0.245571 | cil_head.py | pypi |
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import batchnorm_activation_dropout, Dense
from rl_coach.architectures.tensorflow_components.heads.head import Head
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import Embedding
from rl_coach.spaces import SpacesDefinition, BoxActionSpace
class WolpertingerActorHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh',
batchnorm: bool=True, dense_layer=Dense, is_training=False):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer, is_training=is_training)
self.name = 'wolpertinger_actor_head'
self.return_type = Embedding
self.action_embedding_width = agent_parameters.algorithm.action_embedding_width
self.batchnorm = batchnorm
self.output_scale = self.spaces.action.filtered_action_space.max_abs_range if \
(hasattr(self.spaces.action, 'filtered_action_space') and
isinstance(self.spaces.action.filtered_action_space, BoxActionSpace)) \
else None
def _build_module(self, input_layer):
# mean
pre_activation_policy_value = self.dense_layer(self.action_embedding_width)(input_layer,
name='actor_action_embedding')
self.proto_action = batchnorm_activation_dropout(input_layer=pre_activation_policy_value,
batchnorm=self.batchnorm,
activation_function=self.activation_function,
dropout_rate=0,
is_training=self.is_training,
name="BatchnormActivationDropout_0")[-1]
if self.output_scale is not None:
self.proto_action = tf.multiply(self.proto_action, self.output_scale, name='proto_action')
self.output = [self.proto_action]
def __str__(self):
result = [
'Dense (num outputs = {})'.format(self.action_embedding_width)
]
return '\n'.join(result) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/architectures/tensorflow_components/heads/wolpertinger_actor_head.py | 0.814348 | 0.234461 | wolpertinger_actor_head.py | pypi |
import os
from abc import ABC, abstractmethod
import threading
import pickle
import redis
import numpy as np
from rl_coach.utils import get_latest_checkpoint
class SharedRunningStatsSubscribe(threading.Thread):
def __init__(self, shared_running_stats):
super().__init__()
self.shared_running_stats = shared_running_stats
self.redis_address = self.shared_running_stats.pubsub.params.redis_address
self.redis_port = self.shared_running_stats.pubsub.params.redis_port
self.redis_connection = redis.Redis(self.redis_address, self.redis_port)
self.pubsub = self.redis_connection.pubsub()
self.channel = self.shared_running_stats.channel
self.pubsub.subscribe(self.channel)
def run(self):
for message in self.pubsub.listen():
try:
obj = pickle.loads(message['data'])
self.shared_running_stats.push_val(obj)
except Exception:
continue
class SharedRunningStats(ABC):
def __init__(self, name="", pubsub_params=None):
self.name = name
self.pubsub = None
if pubsub_params:
self.channel = "channel-srs-{}".format(self.name)
from rl_coach.memories.backend.memory_impl import get_memory_backend
self.pubsub = get_memory_backend(pubsub_params)
subscribe_thread = SharedRunningStatsSubscribe(self)
subscribe_thread.daemon = True
subscribe_thread.start()
@abstractmethod
def set_params(self, shape=[1], clip_values=None):
pass
def push(self, x):
if self.pubsub:
self.pubsub.redis_connection.publish(self.channel, pickle.dumps(x))
return
self.push_val(x)
@abstractmethod
def push_val(self, x):
pass
@property
@abstractmethod
def n(self):
pass
@property
@abstractmethod
def mean(self):
pass
@property
@abstractmethod
def var(self):
pass
@property
@abstractmethod
def std(self):
pass
@property
@abstractmethod
def shape(self):
pass
@abstractmethod
def normalize(self, batch):
pass
@abstractmethod
def set_session(self, sess):
pass
@abstractmethod
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
pass
@abstractmethod
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
pass
class NumpySharedRunningStats(SharedRunningStats):
def __init__(self, name, epsilon=1e-2, pubsub_params=None):
super().__init__(name=name, pubsub_params=pubsub_params)
self._count = epsilon
self.epsilon = epsilon
self.checkpoint_file_extension = 'srs'
def set_params(self, shape=[1], clip_values=None):
self._shape = shape
self._mean = np.zeros(shape)
self._std = np.sqrt(self.epsilon) * np.ones(shape)
self._sum = np.zeros(shape)
self._sum_squares = self.epsilon * np.ones(shape)
self.clip_values = clip_values
def push_val(self, samples: np.ndarray):
assert len(samples.shape) >= 2 # we should always have a batch dimension
assert samples.shape[1:] == self._mean.shape, 'RunningStats input shape mismatch'
samples = samples.astype(np.float64)
self._sum += samples.sum(axis=0)
self._sum_squares += np.square(samples).sum(axis=0)
self._count += np.shape(samples)[0]
self._mean = self._sum / self._count
self._std = np.sqrt(np.maximum(
(self._sum_squares - self._count * np.square(self._mean)) / np.maximum(self._count - 1, 1),
self.epsilon))
@property
def n(self):
return self._count
@property
def mean(self):
return self._mean
@property
def var(self):
return self._std ** 2
@property
def std(self):
return self._std
@property
def shape(self):
return self._mean.shape
def normalize(self, batch):
batch = (batch - self.mean) / (self.std + 1e-15)
return np.clip(batch, *self.clip_values)
def set_session(self, sess):
# no session for the numpy implementation
pass
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
dict_to_save = {'_mean': self._mean,
'_std': self._std,
'_count': self._count,
'_sum': self._sum,
'_sum_squares': self._sum_squares}
with open(os.path.join(checkpoint_dir, str(checkpoint_prefix) + '.' + self.checkpoint_file_extension), 'wb') as f:
pickle.dump(dict_to_save, f, pickle.HIGHEST_PROTOCOL)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
latest_checkpoint_filename = get_latest_checkpoint(checkpoint_dir, checkpoint_prefix,
self.checkpoint_file_extension)
if latest_checkpoint_filename == '':
raise ValueError("Could not find NumpySharedRunningStats checkpoint file. ")
with open(os.path.join(checkpoint_dir, str(latest_checkpoint_filename)), 'rb') as f:
saved_dict = pickle.load(f)
self.__dict__.update(saved_dict) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/utilities/shared_running_stats.py | 0.789923 | 0.217379 | shared_running_stats.py | pypi |
import os
import numpy as np
from rl_coach.core_types import RewardType
from rl_coach.filters.reward.reward_filter import RewardFilter
from rl_coach.spaces import RewardSpace
from rl_coach.utilities.shared_running_stats import NumpySharedRunningStats
class RewardNormalizationFilter(RewardFilter):
"""
Normalizes the reward values with a running mean and standard deviation of
all the rewards seen so far. When working with multiple workers, the statistics used for the normalization operation
are accumulated over all the workers.
"""
def __init__(self, clip_min: float=-5.0, clip_max: float=5.0):
"""
:param clip_min: The minimum value to allow after normalizing the reward
:param clip_max: The maximum value to allow after normalizing the reward
"""
super().__init__()
self.clip_min = clip_min
self.clip_max = clip_max
self.running_rewards_stats = None
def set_device(self, device, memory_backend_params=None, mode='numpy') -> None:
"""
An optional function that allows the filter to get the device if it is required to use tensorflow ops
:param device: the device to use
:return: None
"""
if mode == 'tf':
from rl_coach.architectures.tensorflow_components.shared_variables import TFSharedRunningStats
self.running_rewards_stats = TFSharedRunningStats(device, name='rewards_stats', create_ops=False,
pubsub_params=memory_backend_params)
elif mode == 'numpy':
self.running_rewards_stats = NumpySharedRunningStats(name='rewards_stats',
pubsub_params=memory_backend_params)
def set_session(self, sess) -> None:
"""
An optional function that allows the filter to get the session if it is required to use tensorflow ops
:param sess: the session
:return: None
"""
self.running_rewards_stats.set_session(sess)
def filter(self, reward: RewardType, update_internal_state: bool=True) -> RewardType:
if update_internal_state:
if not isinstance(reward, np.ndarray) or len(reward.shape) < 2:
reward = np.array([[reward]])
self.running_rewards_stats.push(reward)
return self.running_rewards_stats.normalize(reward).squeeze()
def get_filtered_reward_space(self, input_reward_space: RewardSpace) -> RewardSpace:
self.running_rewards_stats.set_params(shape=(1,), clip_values=(self.clip_min, self.clip_max))
return input_reward_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_rewards_stats.save_state_to_checkpoint(checkpoint_dir, checkpoint_prefix)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_rewards_stats.restore_state_from_checkpoint(checkpoint_dir, checkpoint_prefix) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/reward/reward_normalization_filter.py | 0.806319 | 0.414662 | reward_normalization_filter.py | pypi |
import os
import numpy as np
import pickle
from rl_coach.core_types import RewardType
from rl_coach.filters.reward.reward_filter import RewardFilter
from rl_coach.spaces import RewardSpace
from rl_coach.utils import get_latest_checkpoint
class RewardEwmaNormalizationFilter(RewardFilter):
"""
Normalizes the reward values based on Exponential Weighted Moving Average.
"""
def __init__(self, alpha: float = 0.01):
"""
:param alpha: the degree of weighting decrease, a constant smoothing factor between 0 and 1.
A higher alpha discounts older observations faster
"""
super().__init__()
self.alpha = alpha
self.moving_average = 0
self.initialized = False
self.checkpoint_file_extension = 'ewma'
self.supports_batching = True
def filter(self, reward: RewardType, update_internal_state: bool=True) -> RewardType:
if not isinstance(reward, np.ndarray):
reward = np.array(reward)
if update_internal_state:
mean_rewards = np.mean(reward)
if not self.initialized:
self.moving_average = mean_rewards
self.initialized = True
else:
self.moving_average += self.alpha * (mean_rewards - self.moving_average)
return reward - self.moving_average
def get_filtered_reward_space(self, input_reward_space: RewardSpace) -> RewardSpace:
return input_reward_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: int):
dict_to_save = {'moving_average': self.moving_average}
with open(os.path.join(checkpoint_dir, str(checkpoint_prefix) + '.' + self.checkpoint_file_extension), 'wb') as f:
pickle.dump(dict_to_save, f, pickle.HIGHEST_PROTOCOL)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
latest_checkpoint_filename = get_latest_checkpoint(checkpoint_dir, checkpoint_prefix,
self.checkpoint_file_extension)
if latest_checkpoint_filename == '':
raise ValueError("Could not find RewardEwmaNormalizationFilter checkpoint file. ")
with open(os.path.join(checkpoint_dir, str(latest_checkpoint_filename)), 'rb') as f:
saved_dict = pickle.load(f)
self.__dict__.update(saved_dict) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/reward/reward_ewma_normalization_filter.py | 0.770637 | 0.310838 | reward_ewma_normalization_filter.py | pypi |
from typing import Union
import numpy as np
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import BoxActionSpace
class BoxMasking(ActionFilter):
"""
Masks part of the action space to enforce the agent to work in a defined space. For example,
if the original action space is between -1 and 1, then this filter can be used in order to constrain the agent actions
to the range 0 and 1 instead. This essentially masks the range -1 and 0 from the agent.
The resulting action space will be shifted and will always start from 0 and have the size of the unmasked area.
"""
def __init__(self,
masked_target_space_low: Union[None, int, float, np.ndarray],
masked_target_space_high: Union[None, int, float, np.ndarray]):
"""
:param masked_target_space_low: the lowest values that can be chosen in the target action space
:param masked_target_space_high: the highest values that can be chosen in the target action space
"""
self.masked_target_space_low = masked_target_space_low
self.masked_target_space_high = masked_target_space_high
self.offset = masked_target_space_low
super().__init__()
def set_masking(self, masked_target_space_low: Union[None, int, float, np.ndarray],
masked_target_space_high: Union[None, int, float, np.ndarray]):
self.masked_target_space_low = masked_target_space_low
self.masked_target_space_high = masked_target_space_high
self.offset = masked_target_space_low
if self.output_action_space:
self.validate_output_action_space(self.output_action_space)
self.input_action_space = BoxActionSpace(self.output_action_space.shape,
low=0,
high=self.masked_target_space_high - self.masked_target_space_low)
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
if self.masked_target_space_low is None or self.masked_target_space_high is None:
raise ValueError("The masking target space size was not set. Please call set_masking.")
if not (np.all(output_action_space.low <= self.masked_target_space_low)
and np.all(self.masked_target_space_low <= output_action_space.high)):
raise ValueError("The low values for masking the action space ({}) are not within the range of the "
"target space (low = {}, high = {})"
.format(self.masked_target_space_low, output_action_space.low, output_action_space.high))
if not (np.all(output_action_space.low <= self.masked_target_space_high)
and np.all(self.masked_target_space_high <= output_action_space.high)):
raise ValueError("The high values for masking the action space ({}) are not within the range of the "
"target space (low = {}, high = {})"
.format(self.masked_target_space_high, output_action_space.low, output_action_space.high))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> BoxActionSpace:
self.output_action_space = output_action_space
self.input_action_space = BoxActionSpace(output_action_space.shape,
low=0,
high=self.masked_target_space_high - self.masked_target_space_low)
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return action + self.offset | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/box_masking.py | 0.93281 | 0.70808 | box_masking.py | pypi |
from rl_coach.core_types import ActionType
from rl_coach.filters.filter import Filter
from rl_coach.spaces import ActionSpace
class ActionFilter(Filter):
def __init__(self, input_action_space: ActionSpace=None):
self.input_action_space = input_action_space
self.output_action_space = None
super().__init__()
def get_unfiltered_action_space(self, output_action_space: ActionSpace) -> ActionSpace:
"""
This function should contain the logic for getting the unfiltered action space
:param output_action_space: the output action space
:return: the unfiltered action space
"""
return output_action_space
def validate_output_action_space(self, output_action_space: ActionSpace):
"""
A function that implements validation of the output action space
:param output_action_space: the input action space
:return: None
"""
pass
def validate_output_action(self, action: ActionType):
"""
A function that verifies that the given action is in the expected output action space
:param action: an action to validate
:return: None
"""
if not self.output_action_space.contains(action):
raise ValueError("The given action ({}) does not match the action space ({})"
.format(action, self.output_action_space))
def filter(self, action: ActionType) -> ActionType:
"""
A function that transforms from the agent's action space to the environment's action space
:param action: an action to transform
:return: transformed action
"""
raise NotImplementedError("")
def reverse_filter(self, action: ActionType) -> ActionType:
"""
A function that transforms from the environment's action space to the agent's action space
:param action: an action to transform
:return: transformed action
"""
raise NotImplementedError("") | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/action_filter.py | 0.822653 | 0.531757 | action_filter.py | pypi |
from typing import Union
import numpy as np
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import BoxActionSpace
class LinearBoxToBoxMap(ActionFilter):
"""
A linear mapping of two box action spaces. For example, if the action space of the
environment consists of continuous actions between 0 and 1, and we want the agent to choose actions between -1 and 1,
the LinearBoxToBoxMap can be used to map the range -1 and 1 to the range 0 and 1 in a linear way. This means that the
action -1 will be mapped to 0, the action 1 will be mapped to 1, and the rest of the actions will be linearly mapped
between those values.
"""
def __init__(self,
input_space_low: Union[None, int, float, np.ndarray],
input_space_high: Union[None, int, float, np.ndarray]):
"""
:param input_space_low: the low values of the desired action space
:param input_space_high: the high values of the desired action space
"""
self.input_space_low = input_space_low
self.input_space_high = input_space_high
self.rescale = None
self.offset = None
super().__init__()
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> BoxActionSpace:
self.input_action_space = BoxActionSpace(output_action_space.shape, self.input_space_low, self.input_space_high)
self.rescale = \
(output_action_space.high - output_action_space.low) / (self.input_space_high - self.input_space_low)
self.offset = output_action_space.low - self.input_space_low
self.output_action_space = output_action_space
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return self.output_action_space.low + (action - self.input_space_low) * self.rescale | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/linear_box_to_box_map.py | 0.940865 | 0.67145 | linear_box_to_box_map.py | pypi |
from typing import List
from rl_coach.core_types import ActionType
from rl_coach.filters.action.action_filter import ActionFilter
from rl_coach.spaces import DiscreteActionSpace, ActionSpace
class PartialDiscreteActionSpaceMap(ActionFilter):
"""
Partial map of two countable action spaces. For example, consider an environment
with a MultiSelect action space (select multiple actions at the same time, such as jump and go right), with 8 actual
MultiSelect actions. If we want the agent to be able to select only 5 of those actions by their index (0-4), we can
map a discrete action space with 5 actions into the 5 selected MultiSelect actions. This will both allow the agent to
use regular discrete actions, and mask 3 of the actions from the agent.
"""
def __init__(self, target_actions: List[ActionType]=None, descriptions: List[str]=None):
"""
:param target_actions: A partial list of actions from the target space to map to.
:param descriptions: a list of descriptions of each of the actions
"""
self.target_actions = target_actions
self.descriptions = descriptions
super().__init__()
def validate_output_action_space(self, output_action_space: ActionSpace):
if not self.target_actions:
raise ValueError("The target actions were not set")
for v in self.target_actions:
if not output_action_space.contains(v):
raise ValueError("The values in the output actions ({}) do not match the output action "
"space definition ({})".format(v, output_action_space))
def get_unfiltered_action_space(self, output_action_space: ActionSpace) -> DiscreteActionSpace:
self.output_action_space = output_action_space
self.input_action_space = DiscreteActionSpace(len(self.target_actions), self.descriptions,
filtered_action_space=output_action_space)
return self.input_action_space
def filter(self, action: ActionType) -> ActionType:
return self.target_actions[action]
def reverse_filter(self, action: ActionType) -> ActionType:
return [(action == x).all() for x in self.target_actions].index(True) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/partial_discrete_action_space_map.py | 0.881672 | 0.534309 | partial_discrete_action_space_map.py | pypi |
from typing import Union, List
import numpy as np
from rl_coach.filters.action.box_discretization import BoxDiscretization
from rl_coach.filters.action.partial_discrete_action_space_map import PartialDiscreteActionSpaceMap
from rl_coach.spaces import AttentionActionSpace, BoxActionSpace, DiscreteActionSpace
class AttentionDiscretization(PartialDiscreteActionSpaceMap):
"""
Discretizes an **AttentionActionSpace**. The attention action space defines the actions
as choosing sub-boxes in a given box. For example, consider an image of size 100x100, where the action is choosing
a crop window of size 20x20 to attend to in the image. AttentionDiscretization allows discretizing the possible crop
windows to choose into a finite number of options, and map a discrete action space into those crop windows.
Warning! this will currently only work for attention spaces with 2 dimensions.
"""
def __init__(self, num_bins_per_dimension: Union[int, List[int]], force_int_bins=False):
"""
:param num_bins_per_dimension: Number of discrete bins to use for each dimension of the action space
:param force_int_bins: If set to True, all the bins will represent integer coordinates in space.
"""
# we allow specifying either a single number for all dimensions, or a single number per dimension in the target
# action space
self.num_bins_per_dimension = num_bins_per_dimension
self.force_int_bins = force_int_bins
# TODO: this will currently only work for attention spaces with 2 dimensions. generalize it.
super().__init__()
def validate_output_action_space(self, output_action_space: AttentionActionSpace):
if not isinstance(output_action_space, AttentionActionSpace):
raise ValueError("AttentionActionSpace discretization only works with an output space of type AttentionActionSpace. "
"The given output space is {}".format(output_action_space))
def get_unfiltered_action_space(self, output_action_space: AttentionActionSpace) -> DiscreteActionSpace:
if isinstance(self.num_bins_per_dimension, int):
self.num_bins_per_dimension = [self.num_bins_per_dimension] * output_action_space.shape[0]
# create a discrete to linspace map to ease the extraction of attention actions
discrete_to_box = BoxDiscretization([n+1 for n in self.num_bins_per_dimension],
self.force_int_bins)
discrete_to_box.get_unfiltered_action_space(BoxActionSpace(output_action_space.shape,
output_action_space.low,
output_action_space.high), )
rows, cols = self.num_bins_per_dimension
start_ind = [i * (cols + 1) + j for i in range(rows + 1) if i < rows for j in range(cols + 1) if j < cols]
end_ind = [i + cols + 2 for i in start_ind]
self.target_actions = [np.array([discrete_to_box.target_actions[start],
discrete_to_box.target_actions[end]])
for start, end in zip(start_ind, end_ind)]
return super().get_unfiltered_action_space(output_action_space) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/attention_discretization.py | 0.850779 | 0.607576 | attention_discretization.py | pypi |
from itertools import product
from typing import Union, List
import numpy as np
from rl_coach.filters.action.partial_discrete_action_space_map import PartialDiscreteActionSpaceMap
from rl_coach.spaces import BoxActionSpace, DiscreteActionSpace
class BoxDiscretization(PartialDiscreteActionSpaceMap):
"""
Discretizes a continuous action space into a discrete action space, allowing the usage of
agents such as DQN for continuous environments such as MuJoCo. Given the number of bins to discretize into, the
original continuous action space is uniformly separated into the given number of bins, each mapped to a discrete
action index. Each discrete action is mapped to a single N dimensional action in the BoxActionSpace action space.
For example, if the original actions space is between -1 and 1 and 5 bins were selected, the new action
space will consist of 5 actions mapped to -1, -0.5, 0, 0.5 and 1.
"""
def __init__(self, num_bins_per_dimension: Union[int, List[int]], force_int_bins=False):
"""
:param num_bins_per_dimension: The number of bins to use for each dimension of the target action space.
The bins will be spread out uniformly over this space
:param force_int_bins: force the bins to represent only integer actions. for example, if the action space is in
the range 0-10 and there are 5 bins, then the bins will be placed at 0, 2, 5, 7, 10,
instead of 0, 2.5, 5, 7.5, 10.
"""
# we allow specifying either a single number for all dimensions, or a single number per dimension in the target
# action space
self.num_bins_per_dimension = num_bins_per_dimension
self.force_int_bins = force_int_bins
super().__init__()
def validate_output_action_space(self, output_action_space: BoxActionSpace):
if not isinstance(output_action_space, BoxActionSpace):
raise ValueError("BoxActionSpace discretization only works with an output space of type BoxActionSpace. "
"The given output space is {}".format(output_action_space))
if len(self.num_bins_per_dimension) != output_action_space.shape:
# TODO: this check is not sufficient. it does not deal with actions spaces with more than one axis
raise ValueError("The length of the list of bins per dimension ({}) does not match the number of "
"dimensions in the action space ({})"
.format(len(self.num_bins_per_dimension), output_action_space))
def get_unfiltered_action_space(self, output_action_space: BoxActionSpace) -> DiscreteActionSpace:
if isinstance(self.num_bins_per_dimension, int):
self.num_bins_per_dimension = np.ones(output_action_space.shape) * self.num_bins_per_dimension
bins = []
for i in range(len(output_action_space.low)):
dim_bins = np.linspace(output_action_space.low[i], output_action_space.high[i],
self.num_bins_per_dimension[i])
if self.force_int_bins:
dim_bins = dim_bins.astype(int)
bins.append(dim_bins)
self.target_actions = [list(action) for action in list(product(*bins))]
return super().get_unfiltered_action_space(output_action_space) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/action/box_discretization.py | 0.847684 | 0.660864 | box_discretization.py | pypi |
import copy
from collections import deque
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, VectorObservationSpace
class LazyStack(object):
"""
A lazy version of np.stack which avoids copying the memory until it is
needed.
"""
def __init__(self, history, axis=None):
self.history = copy.copy(history)
self.axis = axis
def __array__(self, dtype=None):
array = np.stack(self.history, axis=self.axis)
if dtype is not None:
array = array.astype(dtype)
return array
class ObservationStackingFilter(ObservationFilter):
"""
Stacks several observations on top of each other. For image observation this will
create a 3D blob. The stacking is done in a lazy manner in order to reduce memory consumption. To achieve this,
a LazyStack object is used in order to wrap the observations in the stack. For this reason, the
ObservationStackingFilter **must** be the last filter in the inputs filters stack.
This filter is stateful since it stores the previous step result and depends on it.
The filter adds an additional dimension to the output observation.
Warning!!! The filter replaces the observation with a LazyStack object, so no filters should be
applied after this filter. applying more filters will cause the LazyStack object to be converted to a numpy array
and increase the memory footprint.
"""
def __init__(self, stack_size: int, stacking_axis: int=-1):
"""
:param stack_size: the number of previous observations in the stack
:param stacking_axis: the axis on which to stack the observation on
"""
super().__init__()
self.stack_size = stack_size
self.stacking_axis = stacking_axis
self.stack = []
self.input_observation_space = None
if stack_size <= 0:
raise ValueError("The stack shape must be a positive number")
if type(stack_size) != int:
raise ValueError("The stack shape must be of int type")
@property
def next_filter(self) -> 'InputFilter':
return self._next_filter
@next_filter.setter
def next_filter(self, val: 'InputFilter'):
raise ValueError("ObservationStackingFilter can have no other filters after it since they break its "
"functionality")
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if len(self.stack) > 0 and not input_observation_space.contains(self.stack[-1]):
raise ValueError("The given input observation space is different than the observations already stored in"
"the filters memory")
if input_observation_space.num_dimensions <= self.stacking_axis:
raise ValueError("The stacking axis is larger than the number of dimensions in the observation space")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
if len(self.stack) == 0:
self.stack = deque([observation] * self.stack_size, maxlen=self.stack_size)
else:
if update_internal_state:
self.stack.append(observation)
observation = LazyStack(self.stack, self.stacking_axis)
if isinstance(self.input_observation_space, VectorObservationSpace):
# when stacking vectors, we cannot avoid copying the memory as we're flattening it all
observation = np.array(observation).flatten()
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
if isinstance(input_observation_space, VectorObservationSpace):
self.input_observation_space = input_observation_space = VectorObservationSpace(input_observation_space.shape * self.stack_size)
else:
if self.stacking_axis == -1:
input_observation_space.shape = np.append(input_observation_space.shape, values=[self.stack_size], axis=0)
else:
input_observation_space.shape = np.insert(input_observation_space.shape, obj=self.stacking_axis,
values=[self.stack_size], axis=0)
return input_observation_space
def reset(self) -> None:
self.stack = [] | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_stacking_filter.py | 0.898674 | 0.691172 | observation_stacking_filter.py | pypi |
from skimage.transform import resize
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationRescaleSizeByFactorFilter(ObservationFilter):
"""
Rescales an image observation by some factor. For example, the image size
can be reduced by a factor of 2.
"""
def __init__(self, rescale_factor: float):
"""
:param rescale_factor: the factor by which the observation will be rescaled
"""
super().__init__()
self.rescale_factor = float(rescale_factor)
# TODO: allow selecting the channels dim
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not 2 <= input_observation_space.num_dimensions <= 3:
raise ValueError("The rescale filter only applies to image observations where the number of dimensions is"
"either 2 (grayscale) or 3 (RGB). The number of dimensions defined for the "
"output observation was {}".format(input_observation_space.num_dimensions))
if input_observation_space.num_dimensions == 3 and input_observation_space.shape[-1] != 3:
raise ValueError("Observations with 3 dimensions must have 3 channels in the last axis (RGB)")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
observation = observation.astype('uint8')
rescaled_output_size = tuple([int(self.rescale_factor * dim) for dim in observation.shape[:2]])
if len(observation.shape) == 3:
rescaled_output_size += (3,)
# rescale
observation = resize(observation, rescaled_output_size, anti_aliasing=False, preserve_range=True).astype('uint8')
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.shape[:2] = (input_observation_space.shape[:2] * self.rescale_factor).astype('int')
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_rescale_size_by_factor_filter.py | 0.815122 | 0.735452 | observation_rescale_size_by_factor_filter.py | pypi |
import copy
from skimage.transform import resize
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, PlanarMapsObservationSpace, ImageObservationSpace
class ObservationRescaleToSizeFilter(ObservationFilter):
"""
Rescales an image observation to a given size. The target size does not
necessarily keep the aspect ratio of the original observation.
Warning: this requires the input observation to be of type uint8 due to scipy requirements!
"""
def __init__(self, output_observation_space: PlanarMapsObservationSpace):
"""
:param output_observation_space: the output observation space
"""
super().__init__()
self.output_observation_space = output_observation_space
if not isinstance(output_observation_space, PlanarMapsObservationSpace):
raise ValueError("The rescale filter only applies to observation spaces that inherit from "
"PlanarMapsObservationSpace. This includes observations which consist of a set of 2D "
"images or an RGB image. Instead the output observation space was defined as: {}"
.format(output_observation_space.__class__))
self.planar_map_output_shape = copy.copy(self.output_observation_space.shape)
self.planar_map_output_shape = np.delete(self.planar_map_output_shape,
self.output_observation_space.channels_axis)
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not isinstance(input_observation_space, PlanarMapsObservationSpace):
raise ValueError("The rescale filter only applies to observation spaces that inherit from "
"PlanarMapsObservationSpace. This includes observations which consist of a set of 2D "
"images or an RGB image. Instead the input observation space was defined as: {}"
.format(input_observation_space.__class__))
if input_observation_space.shape[input_observation_space.channels_axis] \
!= self.output_observation_space.shape[self.output_observation_space.channels_axis]:
raise ValueError("The number of channels between the input and output observation spaces must match. "
"Instead the number of channels were: {}, {}"
.format(input_observation_space.shape[input_observation_space.channels_axis],
self.output_observation_space.shape[self.output_observation_space.channels_axis]))
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
observation = observation.astype('uint8')
# rescale
if isinstance(self.output_observation_space, ImageObservationSpace):
observation = resize(observation, tuple(self.output_observation_space.shape), anti_aliasing=False,
preserve_range=True).astype('uint8')
else:
new_observation = []
for i in range(self.output_observation_space.shape[self.output_observation_space.channels_axis]):
new_observation.append(resize(observation.take(i, self.output_observation_space.channels_axis),
tuple(self.planar_map_output_shape),
preserve_range=True).astype('uint8'))
new_observation = np.array(new_observation)
observation = new_observation.swapaxes(0, self.output_observation_space.channels_axis)
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.shape = self.output_observation_space.shape
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_rescale_to_size_filter.py | 0.866189 | 0.70374 | observation_rescale_to_size_filter.py | pypi |
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, PlanarMapsObservationSpace
class ObservationMoveAxisFilter(ObservationFilter):
"""
Reorders the axes of the observation. This can be useful when the observation is an
image, and we want to move the channel axis to be the last axis instead of the first axis.
"""
def __init__(self, axis_origin: int = None, axis_target: int=None):
"""
:param axis_origin: The axis to move
:param axis_target: Where to move the selected axis to
"""
super().__init__()
self.axis_origin = axis_origin
self.axis_target = axis_target
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
shape = input_observation_space.shape
if not -len(shape) <= self.axis_origin < len(shape) or not -len(shape) <= self.axis_target < len(shape):
raise ValueError("The given axis does not exist in the context of the input observation shape. ")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
return np.moveaxis(observation, self.axis_origin, self.axis_target)
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
axis_size = input_observation_space.shape[self.axis_origin]
input_observation_space.shape = np.delete(input_observation_space.shape, self.axis_origin)
if self.axis_target == -1:
input_observation_space.shape = np.append(input_observation_space.shape, axis_size)
elif self.axis_target < -1:
input_observation_space.shape = np.insert(input_observation_space.shape, self.axis_target+1, axis_size)
else:
input_observation_space.shape = np.insert(input_observation_space.shape, self.axis_target, axis_size)
# move the channels axis according to the axis change
if isinstance(input_observation_space, PlanarMapsObservationSpace):
if input_observation_space.channels_axis == self.axis_origin:
input_observation_space.channels_axis = self.axis_target
elif input_observation_space.channels_axis == self.axis_target:
input_observation_space.channels_axis = self.axis_origin
elif self.axis_origin < input_observation_space.channels_axis < self.axis_target:
input_observation_space.channels_axis -= 1
elif self.axis_target < input_observation_space.channels_axis < self.axis_origin:
input_observation_space.channels_axis += 1
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_move_axis_filter.py | 0.856647 | 0.677047 | observation_move_axis_filter.py | pypi |
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationToUInt8Filter(ObservationFilter):
"""
Converts a floating point observation into an unsigned int 8 bit observation. This is
mostly useful for reducing memory consumption and is usually used for image observations. The filter will first
spread the observation values over the range 0-255 and then discretize them into integer values.
"""
def __init__(self, input_low: float, input_high: float):
"""
:param input_low: The lowest value currently present in the observation
:param input_high: The highest value currently present in the observation
"""
super().__init__()
self.input_low = input_low
self.input_high = input_high
if input_high <= input_low:
raise ValueError("The input observation space high values can be less or equal to the input observation "
"space low values")
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if np.all(input_observation_space.low != self.input_low) or \
np.all(input_observation_space.high != self.input_high):
raise ValueError("The observation space values range don't match the configuration of the filter."
"The configuration is: low = {}, high = {}. The actual values are: low = {}, high = {}"
.format(self.input_low, self.input_high,
input_observation_space.low, input_observation_space.high))
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
# scale to 0-1
observation = (observation - self.input_low) / (self.input_high - self.input_low)
# scale to 0-255
observation *= 255
observation = observation.astype('uint8')
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
input_observation_space.low = 0
input_observation_space.high = 255
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_to_uint8_filter.py | 0.891699 | 0.72167 | observation_to_uint8_filter.py | pypi |
import copy
from enum import Enum
from typing import List
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace, VectorObservationSpace
class ObservationReductionBySubPartsNameFilter(ObservationFilter):
"""
Allows keeping only parts of the observation, by specifying their
name. This is useful when the environment has a measurements vector as observation which includes several different
measurements, but you want the agent to only see some of the measurements and not all.
For example, the CARLA environment extracts multiple measurements that can be used by the agent, such as
speed and location. If we want to only use the speed, it can be done using this filter.
This will currently work only for VectorObservationSpace observations
"""
class ReductionMethod(Enum):
Keep = 0
Discard = 1
def __init__(self, part_names: List[str], reduction_method: ReductionMethod):
"""
:param part_names: A list of part names to reduce
:param reduction_method: A reduction method to use - keep or discard the given parts
"""
super().__init__()
self.part_names = part_names
self.reduction_method = reduction_method
self.measurement_names = None
self.indices_to_keep = None
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
if not isinstance(observation, np.ndarray):
raise ValueError("All the state values are expected to be numpy arrays")
if self.indices_to_keep is None:
raise ValueError("To use ObservationReductionBySubPartsNameFilter, the get_filtered_observation_space "
"function should be called before filtering an observation")
observation = observation[..., self.indices_to_keep]
return observation
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
if not isinstance(input_observation_space, VectorObservationSpace):
raise ValueError("The ObservationReductionBySubPartsNameFilter support only VectorObservationSpace "
"observations. The given observation space was: {}"
.format(input_observation_space.__class__))
def get_filtered_observation_space(self, input_observation_space: VectorObservationSpace) -> ObservationSpace:
self.measurement_names = copy.copy(input_observation_space.measurements_names)
if self.reduction_method == self.ReductionMethod.Keep:
input_observation_space.shape[-1] = len(self.part_names)
self.indices_to_keep = [idx for idx, val in enumerate(self.measurement_names) if val in self.part_names]
input_observation_space.measurements_names = copy.copy(self.part_names)
elif self.reduction_method == self.ReductionMethod.Discard:
input_observation_space.shape[-1] -= len(self.part_names)
self.indices_to_keep = [idx for idx, val in enumerate(self.measurement_names) if val not in self.part_names]
input_observation_space.measurements_names = [val for val in input_observation_space.measurements_names if
val not in self.part_names]
else:
raise ValueError("The given reduction method is not supported")
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_reduction_by_sub_parts_name_filter.py | 0.937196 | 0.57529 | observation_reduction_by_sub_parts_name_filter.py | pypi |
from typing import Union, Tuple
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationCropFilter(ObservationFilter):
"""
Crops the size of the observation to a given crop window. For example, in Atari, the
observations are images with a shape of 210x160. Usually, we will want to crop the size of the observation to a
square of 160x160 before rescaling them.
"""
def __init__(self, crop_low: np.ndarray=None, crop_high: np.ndarray=None):
"""
:param crop_low: a vector where each dimension describes the start index for cropping the observation in the
corresponding dimension. a negative value of -1 will be mapped to the max size
:param crop_high: a vector where each dimension describes the end index for cropping the observation in the
corresponding dimension. a negative value of -1 will be mapped to the max size
"""
super().__init__()
if crop_low is None and crop_high is None:
raise ValueError("At least one of crop_low and crop_high should be set to a real value. ")
if crop_low is None:
crop_low = np.array([0] * len(crop_high))
if crop_high is None:
crop_high = np.array([-1] * len(crop_low))
self.crop_low = crop_low
self.crop_high = crop_high
for h, l in zip(crop_high, crop_low):
if h < l and h != -1:
raise ValueError("Some of the cropping low values are higher than cropping high values")
if np.any(crop_high < -1) or np.any(crop_low < -1):
raise ValueError("Cropping values cannot be negative")
if crop_low.shape != crop_high.shape:
raise ValueError("The low values and high values for cropping must have the same number of dimensions")
if crop_low.dtype != int or crop_high.dtype != int:
raise ValueError("The crop values should be int values, instead they are defined as: {} and {}"
.format(crop_low.dtype, crop_high.dtype))
def _replace_negative_one_in_crop_size(self, crop_size: np.ndarray, observation_shape: Union[Tuple, np.ndarray]):
# replace -1 with the max size
crop_size = crop_size.copy()
for i in range(len(observation_shape)):
if crop_size[i] == -1:
crop_size[i] = observation_shape[i]
return crop_size
def validate_input_observation_space(self, input_observation_space: ObservationSpace):
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, input_observation_space.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, input_observation_space.shape)
if np.any(crop_high > input_observation_space.shape) or \
np.any(crop_low > input_observation_space.shape):
raise ValueError("The cropping values are outside of the observation space")
if not input_observation_space.is_valid_index(crop_low) or \
not input_observation_space.is_valid_index(crop_high - 1):
raise ValueError("The cropping indices are outside of the observation space")
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
# replace -1 with the max size
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, observation.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, observation.shape)
# crop
indices = [slice(i, j) for i, j in zip(crop_low, crop_high)]
observation = observation[indices]
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
# replace -1 with the max size
crop_high = self._replace_negative_one_in_crop_size(self.crop_high, input_observation_space.shape)
crop_low = self._replace_negative_one_in_crop_size(self.crop_low, input_observation_space.shape)
input_observation_space.shape = crop_high - crop_low
return input_observation_space | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_crop_filter.py | 0.936605 | 0.777638 | observation_crop_filter.py | pypi |
import os
import pickle
from typing import List
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
from rl_coach.utilities.shared_running_stats import NumpySharedRunningStats, NumpySharedRunningStats
class ObservationNormalizationFilter(ObservationFilter):
"""
Normalizes the observation values with a running mean and standard deviation of
all the observations seen so far. The normalization is performed element-wise. Additionally, when working with
multiple workers, the statistics used for the normalization operation are accumulated over all the workers.
"""
def __init__(self, clip_min: float=-5.0, clip_max: float=5.0, name='observation_stats'):
"""
:param clip_min: The minimum value to allow after normalizing the observation
:param clip_max: The maximum value to allow after normalizing the observation
"""
super().__init__()
self.clip_min = clip_min
self.clip_max = clip_max
self.running_observation_stats = None
self.name = name
self.supports_batching = True
self.observation_space = None
def set_device(self, device, memory_backend_params=None, mode='numpy') -> None:
"""
An optional function that allows the filter to get the device if it is required to use tensorflow ops
:param device: the device to use
:memory_backend_params: if not None, holds params for a memory backend for sharing data (e.g. Redis)
:param mode: the arithmetic module to use {'tf' | 'numpy'}
:return: None
"""
if mode == 'tf':
from rl_coach.architectures.tensorflow_components.shared_variables import TFSharedRunningStats
self.running_observation_stats = TFSharedRunningStats(device, name=self.name, create_ops=False,
pubsub_params=memory_backend_params)
elif mode == 'numpy':
self.running_observation_stats = NumpySharedRunningStats(name=self.name,
pubsub_params=memory_backend_params)
def set_session(self, sess) -> None:
"""
An optional function that allows the filter to get the session if it is required to use tensorflow ops
:param sess: the session
:return: None
"""
self.running_observation_stats.set_session(sess)
def filter(self, observations: List[ObservationType], update_internal_state: bool=True) -> ObservationType:
observations = np.array(observations)
if update_internal_state:
self.running_observation_stats.push(observations)
self.last_mean = self.running_observation_stats.mean
self.last_stdev = self.running_observation_stats.std
return self.running_observation_stats.normalize(observations)
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
self.running_observation_stats.set_params(shape=input_observation_space.shape,
clip_values=(self.clip_min, self.clip_max))
return input_observation_space
def save_state_to_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.save_state_to_checkpoint(checkpoint_dir, checkpoint_prefix)
def restore_state_from_checkpoint(self, checkpoint_dir: str, checkpoint_prefix: str):
self.running_observation_stats.restore_state_from_checkpoint(checkpoint_dir, checkpoint_prefix) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/filters/observation/observation_normalization_filter.py | 0.918567 | 0.487795 | observation_normalization_filter.py | pypi |
import time
import os
from rl_coach.checkpoint import CheckpointStateReader
from rl_coach.data_stores.data_store import SyncFiles
class CheckpointDataStore(object):
"""
A DataStore which relies on the GraphManager check pointing methods to communicate policies.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.checkpoint_num = 0
def end_of_policies(self) -> bool:
"""
Returns true if no new policies will be added to this DataStore. This typically happens
because training has completed and is used to signal to the rollout workers to stop.
"""
return os.path.exists(
os.path.join(self.checkpoint_dir, SyncFiles.FINISHED.value)
)
def save_policy(self, graph_manager):
# TODO: it would be nice if restore_checkpoint accepted a checkpoint path as a
# parameter. as it is, one cannot distinguish between checkpoints used for coordination
# and checkpoints requested to a persistent disk for later use
graph_manager.task_parameters.checkpoint_restore_path = self.checkpoint_dir
graph_manager.save_checkpoint()
def load_policy(self, graph_manager, require_new_policy=True, timeout=None):
"""
Load a checkpoint into the specified graph_manager. The expectation here is that
save_to_store() and load_from_store() will synchronize a checkpoint directory with a
central repository such as NFS or S3.
:param graph_manager: the graph_manager to load the policy into
:param require_new_policy: if True, only load a policy if it hasn't been loaded in this
process yet before.
:param timeout: Will only try to load the policy once if timeout is None, otherwise will
retry for timeout seconds
"""
if self._new_policy_exists(require_new_policy, timeout):
# TODO: it would be nice if restore_checkpoint accepted a checkpoint path as a
# parameter. as it is, one cannot distinguish between checkpoints used for coordination
# and checkpoints requested to a persistent disk for later use
graph_manager.task_parameters.checkpoint_restore_path = self.checkpoint_dir
graph_manager.restore_checkpoint()
def _new_policy_exists(self, require_new_policy=True, timeout=None) -> bool:
"""
:param require_new_policy: if True, only load a policy if it hasn't been loaded in this
process yet before.
:param timeout: Will only try to load the policy once if timeout is None, otherwise will
retry for timeout seconds
"""
checkpoint_state_reader = CheckpointStateReader(
self.checkpoint_dir, checkpoint_state_optional=False
)
checkpoint = "first"
if timeout is None:
timeout = 0
timeout_ends = time.time() + timeout
while time.time() < timeout_ends or checkpoint == "first":
if self.end_of_policies():
return False
self.load_from_store()
checkpoint = checkpoint_state_reader.get_latest()
if checkpoint is not None:
if not require_new_policy or checkpoint.num > self.checkpoint_num:
self.checkpoint_num = checkpoint.num
return True
raise ValueError(
"Waited for {timeout} seconds, but no first policy was received.".format(
timeout=timeout
)
) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/data_stores/checkpoint_data_store.py | 0.465873 | 0.236461 | checkpoint_data_store.py | pypi |
import uuid
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.checkpoint_data_store import CheckpointDataStore
class NFSDataStoreParameters(DataStoreParameters):
def __init__(self, ds_params, deployed=False, server=None, path=None, checkpoint_dir: str=""):
super().__init__(ds_params.store_type, ds_params.orchestrator_type, ds_params.orchestrator_params)
self.namespace = "default"
if "namespace" in ds_params.orchestrator_params:
self.namespace = ds_params.orchestrator_params["namespace"]
self.checkpoint_dir = checkpoint_dir
self.name = None
self.pvc_name = None
self.pv_name = None
self.svc_name = None
self.server = None
self.path = "/"
self.deployed = deployed
if deployed:
self.server = server
self.path = path
class NFSDataStore(CheckpointDataStore):
"""
An implementation of data store which uses NFS for storing policy checkpoints when using Coach in distributed mode.
The policy checkpoints are written by the trainer and read by the rollout worker.
"""
def __init__(self, params: NFSDataStoreParameters):
"""
:param params: The parameters required to use the NFS data store.
"""
self.params = params
def deploy(self) -> bool:
"""
Deploy the NFS server in an orchestrator if/when required.
"""
if self.params.orchestrator_type == "kubernetes":
if not self.params.deployed:
if not self.deploy_k8s_nfs():
return False
if not self.create_k8s_nfs_resources():
return False
return True
def get_info(self):
from kubernetes import client as k8sclient
return k8sclient.V1PersistentVolumeClaimVolumeSource(
claim_name=self.params.pvc_name
)
def undeploy(self) -> bool:
"""
Undeploy the NFS server and resources from an orchestrator.
"""
if self.params.orchestrator_type == "kubernetes":
if not self.params.deployed:
if not self.undeploy_k8s_nfs():
return False
if not self.delete_k8s_nfs_resources():
return False
return True
def save_to_store(self):
pass
def load_from_store(self):
pass
def deploy_k8s_nfs(self) -> bool:
"""
Deploy the NFS server in the Kubernetes orchestrator.
"""
from kubernetes import client as k8sclient
name = "nfs-server-{}".format(uuid.uuid4())
container = k8sclient.V1Container(
name=name,
image="k8s.gcr.io/volume-nfs:0.8",
ports=[k8sclient.V1ContainerPort(
name="nfs",
container_port=2049,
protocol="TCP"
),
k8sclient.V1ContainerPort(
name="rpcbind",
container_port=111
),
k8sclient.V1ContainerPort(
name="mountd",
container_port=20048
),
],
volume_mounts=[k8sclient.V1VolumeMount(
name='nfs-host-path',
mount_path='/exports'
)],
security_context=k8sclient.V1SecurityContext(privileged=True)
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
volumes=[k8sclient.V1Volume(
name="nfs-host-path",
host_path=k8sclient.V1HostPathVolumeSource(path='/tmp/nfsexports-{}'.format(uuid.uuid4()))
)]
)
)
deployment_spec = k8sclient.V1DeploymentSpec(
replicas=1,
template=template,
selector=k8sclient.V1LabelSelector(
match_labels={'app': name}
)
)
deployment = k8sclient.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=k8sclient.V1ObjectMeta(name=name, labels={'app': name}),
spec=deployment_spec
)
k8s_apps_v1_api_client = k8sclient.AppsV1Api()
try:
k8s_apps_v1_api_client.create_namespaced_deployment(self.params.namespace, deployment)
self.params.name = name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating nfs-server", e)
return False
k8s_core_v1_api_client = k8sclient.CoreV1Api()
svc_name = "nfs-service-{}".format(uuid.uuid4())
service = k8sclient.V1Service(
api_version='v1',
kind='Service',
metadata=k8sclient.V1ObjectMeta(
name=svc_name
),
spec=k8sclient.V1ServiceSpec(
selector={'app': self.params.name},
ports=[k8sclient.V1ServicePort(
protocol='TCP',
port=2049,
target_port=2049
)]
)
)
try:
svc_response = k8s_core_v1_api_client.create_namespaced_service(self.params.namespace, service)
self.params.svc_name = svc_name
self.params.server = svc_response.spec.cluster_ip
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating a service for nfs-server", e)
return False
return True
def create_k8s_nfs_resources(self) -> bool:
"""
Create NFS resources such as PV and PVC in Kubernetes.
"""
from kubernetes import client as k8sclient
pv_name = "nfs-ckpt-pv-{}".format(uuid.uuid4())
persistent_volume = k8sclient.V1PersistentVolume(
api_version="v1",
kind="PersistentVolume",
metadata=k8sclient.V1ObjectMeta(
name=pv_name,
labels={'app': pv_name}
),
spec=k8sclient.V1PersistentVolumeSpec(
access_modes=["ReadWriteMany"],
nfs=k8sclient.V1NFSVolumeSource(
path=self.params.path,
server=self.params.server
),
capacity={'storage': '10Gi'},
storage_class_name=""
)
)
k8s_api_client = k8sclient.CoreV1Api()
try:
k8s_api_client.create_persistent_volume(persistent_volume)
self.params.pv_name = pv_name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating the NFS PV", e)
return False
pvc_name = "nfs-ckpt-pvc-{}".format(uuid.uuid4())
persistent_volume_claim = k8sclient.V1PersistentVolumeClaim(
api_version="v1",
kind="PersistentVolumeClaim",
metadata=k8sclient.V1ObjectMeta(
name=pvc_name
),
spec=k8sclient.V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteMany"],
resources=k8sclient.V1ResourceRequirements(
requests={'storage': '10Gi'}
),
selector=k8sclient.V1LabelSelector(
match_labels={'app': self.params.pv_name}
),
storage_class_name=""
)
)
try:
k8s_api_client.create_namespaced_persistent_volume_claim(self.params.namespace, persistent_volume_claim)
self.params.pvc_name = pvc_name
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while creating the NFS PVC", e)
return False
return True
def undeploy_k8s_nfs(self) -> bool:
from kubernetes import client as k8sclient
del_options = k8sclient.V1DeleteOptions()
k8s_apps_v1_api_client = k8sclient.AppsV1Api()
try:
k8s_apps_v1_api_client.delete_namespaced_deployment(self.params.name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting nfs-server", e)
return False
k8s_core_v1_api_client = k8sclient.CoreV1Api()
try:
k8s_core_v1_api_client.delete_namespaced_service(self.params.svc_name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting the service for nfs-server", e)
return False
return True
def delete_k8s_nfs_resources(self) -> bool:
"""
Delete NFS resources such as PV and PVC from the Kubernetes orchestrator.
"""
from kubernetes import client as k8sclient
del_options = k8sclient.V1DeleteOptions()
k8s_api_client = k8sclient.CoreV1Api()
try:
k8s_api_client.delete_persistent_volume(self.params.pv_name, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting NFS PV", e)
return False
try:
k8s_api_client.delete_namespaced_persistent_volume_claim(self.params.pvc_name, self.params.namespace, del_options)
except k8sclient.rest.ApiException as e:
print("Got exception: %s\n while deleting NFS PVC", e)
return False
return True
def setup_checkpoint_dir(self, crd=None):
if crd:
# TODO: find a way to upload this to the deployed nfs store.
pass | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/data_stores/nfs_data_store.py | 0.541409 | 0.150809 | nfs_data_store.py | pypi |
import copy
from typing import Union
from collections import OrderedDict
import numpy as np
from rl_coach.agents.agent import Agent
from rl_coach.agents.ddpg_agent import DDPGAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, TD3VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters, EmbedderScheme
from rl_coach.core_types import ActionInfo, TrainingSteps, Transition
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace, GoalsSpace
class TD3CriticNetworkParameters(NetworkParameters):
def __init__(self, num_q_networks):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(),
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
self.middleware_parameters = FCMiddlewareParameters(num_streams=num_q_networks)
self.heads_parameters = [TD3VHeadParameters()]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3ActorNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DDPGActorHeadParameters(batchnorm=False)]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3AlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target
:param num_consecutive_playing_steps: (StepMethod)
The number of consecutive steps to act between every two training iterations
:param use_target_network_for_evaluation: (bool)
If set to True, the target network will be used for predicting the actions when choosing actions to act.
Since the target network weights change more slowly, the predicted actions will be more consistent.
:param action_penalty: (float)
The amount by which to penalize the network on high action feature (pre-activation) values.
This can prevent the actions features from saturating the TanH activation function, and therefore prevent the
gradients from becoming very low.
:param clip_critic_targets: (Tuple[float, float] or None)
The range to clip the critic target to in order to prevent overestimation of the action values.
:param use_non_zero_discount_for_terminal_states: (bool)
If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state
values. If set to False, the terminal states reward will be taken as the target return for the network.
"""
def __init__(self):
super().__init__()
self.rate_for_copying_weights_to_target = 0.005
self.use_target_network_for_evaluation = False
self.action_penalty = 0
self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None
self.use_non_zero_discount_for_terminal_states = False
self.act_for_full_episodes = True
self.update_policy_every_x_episode_steps = 2
self.num_steps_between_copying_online_weights_to_target = TrainingSteps(self.update_policy_every_x_episode_steps)
self.policy_noise = 0.2
self.noise_clipping = 0.5
self.num_q_networks = 2
class TD3AgentExplorationParameters(AdditiveNoiseParameters):
def __init__(self):
super().__init__()
self.noise_as_percentage_from_action_space = False
class TD3AgentParameters(AgentParameters):
def __init__(self):
td3_algorithm_params = TD3AlgorithmParameters()
super().__init__(algorithm=td3_algorithm_params,
exploration=TD3AgentExplorationParameters(),
memory=EpisodicExperienceReplayParameters(),
networks=OrderedDict([("actor", TD3ActorNetworkParameters()),
("critic",
TD3CriticNetworkParameters(td3_algorithm_params.num_q_networks))]))
@property
def path(self):
return 'rl_coach.agents.td3_agent:TD3Agent'
# Twin Delayed DDPG - https://arxiv.org/pdf/1802.09477.pdf
class TD3Agent(DDPGAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.q_values = self.register_signal("Q")
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']
actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# TD error = r + discount*max(q_st_plus_1) - q_st
next_actions, actions_mean = actor.parallel_prediction([
(actor.target_network, batch.next_states(actor_keys)),
(actor.online_network, batch.states(actor_keys))
])
# add noise to the next_actions
noise = np.random.normal(0, self.ap.algorithm.policy_noise, next_actions.shape).clip(
-self.ap.algorithm.noise_clipping, self.ap.algorithm.noise_clipping)
next_actions = self.spaces.action.clip_action_to_space(next_actions + noise)
critic_inputs = copy.copy(batch.next_states(critic_keys))
critic_inputs['action'] = next_actions
q_st_plus_1 = critic.target_network.predict(critic_inputs)[2] # output #2 is the min (Q1, Q2)
# calculate the bootstrapped TD targets while discounting terminal states according to
# use_non_zero_discount_for_terminal_states
if self.ap.algorithm.use_non_zero_discount_for_terminal_states:
TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1
else:
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1
# clip the TD targets to prevent overestimation errors
if self.ap.algorithm.clip_critic_targets:
TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)
self.TD_targets_signal.add_sample(TD_targets)
# train the critic
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)
result = critic.train_and_sync_networks(critic_inputs, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
if self.training_iteration % self.ap.algorithm.update_policy_every_x_episode_steps == 0:
# get the gradients of output #3 (=mean of Q1 network) w.r.t the action
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = actions_mean
action_gradients = critic.online_network.predict(critic_inputs,
outputs=critic.online_network.gradients_wrt_inputs[3]['action'])
# apply the gradients from the critic to the actor
initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}
gradients = actor.online_network.predict(batch.states(actor_keys),
outputs=actor.online_network.weighted_gradients[0],
initial_feed_dict=initial_feed_dict)
if actor.has_global:
actor.apply_gradients_to_global_network(gradients)
actor.update_online_network()
else:
actor.apply_gradients_to_online_network(gradients)
return total_loss, losses, unclipped_grads
def train(self):
self.ap.algorithm.num_consecutive_training_steps = self.current_episode_steps_counter
return Agent.train(self)
def update_transition_before_adding_to_replay_buffer(self, transition: Transition) -> Transition:
"""
Allows agents to update the transition just before adding it to the replay buffer.
Can be useful for agents that want to tweak the reward, termination signal, etc.
:param transition: the transition to update
:return: the updated transition
"""
transition.game_over = False if self.current_episode_steps_counter ==\
self.parent_level_manager.environment.env._max_episode_steps\
else transition.game_over
return transition | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/td3_agent.py | 0.893959 | 0.360883 | td3_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import ACERPolicyHeadParameters, QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters
from rl_coach.core_types import Batch
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import DiscreteActionSpace
from rl_coach.utils import eps, last_sample
class ACERAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_gradient_updates: (int)
Every num_steps_between_gradient_updates transitions will be considered as a single batch and use for
accumulating gradients. This is also the number of steps used for bootstrapping according to the n-step formulation.
:param ratio_of_replay: (int)
The number of off-policy training iterations in each ACER iteration.
:param num_transitions_to_start_replay: (int)
Number of environment steps until ACER starts to train off-policy from the experience replay.
This emulates a heat-up phase where the agents learns only on-policy until there are enough transitions in
the experience replay to start the off-policy training.
:param rate_for_copying_weights_to_target: (float)
The rate of the exponential moving average for the average policy which is used for the trust region optimization.
The target network in this algorithm is used as the average policy.
:param importance_weight_truncation: (float)
The clipping constant for the importance weight truncation (not used in the Q-retrace calculation).
:param use_trust_region_optimization: (bool)
If set to True, the gradients of the network will be modified with a term dependant on the KL divergence between
the average policy and the current one, to bound the change of the policy during the network update.
:param max_KL_divergence: (float)
The upper bound parameter for the trust region optimization, use_trust_region_optimization needs to be set true
for this parameter to have an effect.
:param beta_entropy: (float)
An entropy regulaization term can be added to the loss function in order to control exploration. This term
is weighted using the beta value defined by beta_entropy.
"""
def __init__(self):
super().__init__()
self.apply_gradients_every_x_episodes = 5
self.num_steps_between_gradient_updates = 5000
self.ratio_of_replay = 4
self.num_transitions_to_start_replay = 10000
self.rate_for_copying_weights_to_target = 0.01
self.importance_weight_truncation = 10.0
self.use_trust_region_optimization = True
self.max_KL_divergence = 1.0
self.beta_entropy = 0
class ACERNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [QHeadParameters(loss_weight=0.5), ACERPolicyHeadParameters(loss_weight=1.0)]
self.optimizer_type = 'Adam'
self.async_training = True
self.clip_gradients = 40.0
self.create_target_network = True
class ACERAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=ACERAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters()},
memory=EpisodicExperienceReplayParameters(),
networks={"main": ACERNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.acer_agent:ACERAgent'
# Actor-Critic with Experience Replay - https://arxiv.org/abs/1611.01224
class ACERAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
# signals definition
self.q_loss = self.register_signal('Q Loss')
self.policy_loss = self.register_signal('Policy Loss')
self.probability_loss = self.register_signal('Probability Loss')
self.bias_correction_loss = self.register_signal('Bias Correction Loss')
self.unclipped_grads = self.register_signal('Grads (unclipped)')
self.V_Values = self.register_signal('Values')
self.kl_divergence = self.register_signal('KL Divergence')
def _learn_from_batch(self, batch):
fetches = [self.networks['main'].online_network.output_heads[1].probability_loss,
self.networks['main'].online_network.output_heads[1].bias_correction_loss,
self.networks['main'].online_network.output_heads[1].kl_divergence]
# batch contains a list of transitions to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
Q_values, policy_prob = self.networks['main'].online_network.predict(batch.states(network_keys))
avg_policy_prob = self.networks['main'].target_network.predict(batch.states(network_keys))[1]
current_state_values = np.sum(policy_prob * Q_values, axis=1)
actions = batch.actions()
num_transitions = batch.size
Q_head_targets = Q_values
Q_i = Q_values[np.arange(num_transitions), actions]
mu = batch.info('all_action_probabilities')
rho = policy_prob / (mu + eps)
rho_i = rho[np.arange(batch.size), actions]
rho_bar = np.minimum(1.0, rho_i)
if batch.game_overs()[-1]:
Qret = 0
else:
result = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))
Qret = np.sum(result[0] * result[1], axis=1)[0]
for i in reversed(range(num_transitions)):
Qret = batch.rewards()[i] + self.ap.algorithm.discount * Qret
Q_head_targets[i, actions[i]] = Qret
Qret = rho_bar[i] * (Qret - Q_i[i]) + current_state_values[i]
Q_retrace = Q_head_targets[np.arange(num_transitions), actions]
# train
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys),
'output_1_0': actions,
'output_1_1': rho,
'output_1_2': rho_i,
'output_1_3': Q_values,
'output_1_4': Q_retrace,
'output_1_5': avg_policy_prob},
[Q_head_targets, current_state_values],
additional_fetches=fetches)
for network in self.networks.values():
network.update_target_network(self.ap.algorithm.rate_for_copying_weights_to_target)
# logging
total_loss, losses, unclipped_grads, fetch_result = result[:4]
self.q_loss.add_sample(losses[0])
self.policy_loss.add_sample(losses[1])
self.probability_loss.add_sample(fetch_result[0])
self.bias_correction_loss.add_sample(fetch_result[1])
self.unclipped_grads.add_sample(unclipped_grads)
self.V_Values.add_sample(current_state_values)
self.kl_divergence.add_sample(fetch_result[2])
return total_loss, losses, unclipped_grads
def learn_from_batch(self, batch):
# perform on-policy training iteration
total_loss, losses, unclipped_grads = self._learn_from_batch(batch)
if self.ap.algorithm.ratio_of_replay > 0 \
and self.memory.num_transitions() > self.ap.algorithm.num_transitions_to_start_replay:
n = np.random.poisson(self.ap.algorithm.ratio_of_replay)
# perform n off-policy training iterations
for _ in range(n):
new_batch = Batch(self.call_memory('sample', (self.ap.algorithm.num_steps_between_gradient_updates, True)))
result = self._learn_from_batch(new_batch)
total_loss += result[0]
losses += result[1]
unclipped_grads += result[2]
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "main")
return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/acer_agent.py | 0.925424 | 0.474266 | acer_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import NAFHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, \
NetworkParameters
from rl_coach.core_types import ActionInfo, EnvironmentSteps
from rl_coach.exploration_policies.ou_process import OUProcessParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace
class NAFNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [NAFHeadParameters()]
self.optimizer_type = 'Adam'
self.learning_rate = 0.001
self.async_training = True
self.create_target_network = True
class NAFAlgorithmParameters(AlgorithmParameters):
def __init__(self):
super().__init__()
self.num_consecutive_training_steps = 5
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.001
class NAFAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NAFAlgorithmParameters(),
exploration=OUProcessParameters(),
memory=EpisodicExperienceReplayParameters(),
networks={"main": NAFNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.naf_agent:NAFAgent'
# Normalized Advantage Functions - https://arxiv.org/pdf/1603.00748.pdf
class NAFAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.l_values = self.register_signal("L")
self.a_values = self.register_signal("Advantage")
self.mu_values = self.register_signal("Action")
self.v_values = self.register_signal("V")
self.TD_targets = self.register_signal("TD targets")
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# TD error = r + discount*v_st_plus_1 - q_st
v_st_plus_1 = self.networks['main'].target_network.predict(
batch.next_states(network_keys),
self.networks['main'].target_network.output_heads[0].V,
squeeze_output=False,
)
TD_targets = np.expand_dims(batch.rewards(), -1) + \
(1.0 - np.expand_dims(batch.game_overs(), -1)) * self.ap.algorithm.discount * v_st_plus_1
self.TD_targets.add_sample(TD_targets)
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys),
'output_0_0': batch.actions(len(batch.actions().shape) == 1)
}, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def choose_action(self, curr_state):
if type(self.spaces.action) != BoxActionSpace:
raise ValueError('NAF works only for continuous control problems')
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'main')
naf_head = self.networks['main'].online_network.output_heads[0]
action_values = self.networks['main'].online_network.predict(tf_input_state, outputs=naf_head.mu,
squeeze_output=False)
# get the actual action to use
action = self.exploration_policy.get_action(action_values)
# get the internal values for logging
outputs = [naf_head.mu, naf_head.Q, naf_head.L, naf_head.A, naf_head.V]
result = self.networks['main'].online_network.predict(
{**tf_input_state, 'output_0_0': action_values},
outputs=outputs
)
mu, Q, L, A, V = result
# store the q values statistics for logging
self.q_values.add_sample(Q)
self.l_values.add_sample(L)
self.a_values.add_sample(A)
self.mu_values.add_sample(mu)
self.v_values.add_sample(V)
action_info = ActionInfo(action=action, action_value=Q)
return action_info | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/naf_agent.py | 0.901252 | 0.275934 | naf_agent.py | pypi |
from typing import Union
from rl_coach.agents.imitation_agent import ImitationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import RegressionHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AgentParameters, MiddlewareScheme, NetworkParameters, AlgorithmParameters
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.non_episodic.balanced_experience_replay import BalancedExperienceReplayParameters
class CILAlgorithmParameters(AlgorithmParameters):
"""
:param state_key_with_the_class_index: (str)
The key of the state dictionary which corresponds to the value that will be used to control the class index.
"""
def __init__(self):
super().__init__()
self.state_key_with_the_class_index = 'high_level_command'
class CILNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters(scheme=MiddlewareScheme.Medium)
self.heads_parameters = [RegressionHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 32
self.replace_mse_with_huber_loss = False
self.create_target_network = False
class CILAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=CILAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=BalancedExperienceReplayParameters(),
networks={"main": CILNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.cil_agent:CILAgent'
# Conditional Imitation Learning Agent: https://arxiv.org/abs/1710.02410
class CILAgent(ImitationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_high_level_control = 0
def choose_action(self, curr_state):
self.current_high_level_control = curr_state[self.ap.algorithm.state_key_with_the_class_index]
return super().choose_action(curr_state)
def extract_action_values(self, prediction):
return prediction[self.current_high_level_control].squeeze()
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
target_values = self.networks['main'].online_network.predict({**batch.states(network_keys)})
branch_to_update = batch.states([self.ap.algorithm.state_key_with_the_class_index])[self.ap.algorithm.state_key_with_the_class_index]
for idx, branch in enumerate(branch_to_update):
target_values[branch][idx] = batch.actions()[idx]
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys)}, target_values)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/cil_agent.py | 0.927601 | 0.283546 | cil_agent.py | pypi |
import copy
from typing import Union
from collections import OrderedDict
import numpy as np
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
from rl_coach.agents.agent import Agent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters, EmbedderScheme
from rl_coach.core_types import ActionInfo, EnvironmentSteps
from rl_coach.exploration_policies.ou_process import OUProcessParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace, GoalsSpace
class DDPGCriticNetworkParameters(NetworkParameters):
def __init__(self, use_batchnorm=False):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm),
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DDPGVHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 64
self.async_training = False
self.learning_rate = 0.001
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
# self.l2_regularization = 1e-2
class DDPGActorNetworkParameters(NetworkParameters):
def __init__(self, use_batchnorm=False):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm)}
self.middleware_parameters = FCMiddlewareParameters(batchnorm=use_batchnorm)
self.heads_parameters = [DDPGActorHeadParameters(batchnorm=use_batchnorm)]
self.optimizer_type = 'Adam'
self.batch_size = 64
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.async_training = False
self.learning_rate = 0.0001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class DDPGAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target
:param num_consecutive_playing_steps: (StepMethod)
The number of consecutive steps to act between every two training iterations
:param use_target_network_for_evaluation: (bool)
If set to True, the target network will be used for predicting the actions when choosing actions to act.
Since the target network weights change more slowly, the predicted actions will be more consistent.
:param action_penalty: (float)
The amount by which to penalize the network on high action feature (pre-activation) values.
This can prevent the actions features from saturating the TanH activation function, and therefore prevent the
gradients from becoming very low.
:param clip_critic_targets: (Tuple[float, float] or None)
The range to clip the critic target to in order to prevent overestimation of the action values.
:param use_non_zero_discount_for_terminal_states: (bool)
If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state
values. If set to False, the terminal states reward will be taken as the target return for the network.
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.001
self.num_consecutive_playing_steps = EnvironmentSteps(1)
self.use_target_network_for_evaluation = False
self.action_penalty = 0
self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None
self.use_non_zero_discount_for_terminal_states = False
class DDPGAgentParameters(AgentParameters):
def __init__(self, use_batchnorm=False):
super().__init__(algorithm=DDPGAlgorithmParameters(),
exploration=OUProcessParameters(),
memory=EpisodicExperienceReplayParameters(),
networks=OrderedDict([("actor", DDPGActorNetworkParameters(use_batchnorm=use_batchnorm)),
("critic", DDPGCriticNetworkParameters(use_batchnorm=use_batchnorm))]))
@property
def path(self):
return 'rl_coach.agents.ddpg_agent:DDPGAgent'
# Deep Deterministic Policy Gradients Network - https://arxiv.org/pdf/1509.02971.pdf
class DDPGAgent(ActorCriticAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.q_values = self.register_signal("Q")
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']
actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# TD error = r + discount*max(q_st_plus_1) - q_st
next_actions, actions_mean = actor.parallel_prediction([
(actor.target_network, batch.next_states(actor_keys)),
(actor.online_network, batch.states(actor_keys))
])
critic_inputs = copy.copy(batch.next_states(critic_keys))
critic_inputs['action'] = next_actions
q_st_plus_1 = critic.target_network.predict(critic_inputs)[0]
# calculate the bootstrapped TD targets while discounting terminal states according to
# use_non_zero_discount_for_terminal_states
if self.ap.algorithm.use_non_zero_discount_for_terminal_states:
TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1
else:
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1
# clip the TD targets to prevent overestimation errors
if self.ap.algorithm.clip_critic_targets:
TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)
self.TD_targets_signal.add_sample(TD_targets)
# get the gradients of the critic output with respect to the action
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = actions_mean
action_gradients = critic.online_network.predict(critic_inputs,
outputs=critic.online_network.gradients_wrt_inputs[1]['action'])
# train the critic
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)
# also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work
result = critic.train_and_sync_networks(critic_inputs, TD_targets, use_inputs_for_apply_gradients=True)
total_loss, losses, unclipped_grads = result[:3]
# apply the gradients from the critic to the actor
initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}
gradients = actor.online_network.predict(batch.states(actor_keys),
outputs=actor.online_network.weighted_gradients[0],
initial_feed_dict=initial_feed_dict)
# also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work
if actor.has_global:
actor.apply_gradients_to_global_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))
actor.update_online_network()
else:
actor.apply_gradients_to_online_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))
return total_loss, losses, unclipped_grads
def train(self):
return Agent.train(self)
def choose_action(self, curr_state):
if not (isinstance(self.spaces.action, BoxActionSpace) or isinstance(self.spaces.action, GoalsSpace)):
raise ValueError("DDPG works only for continuous control problems")
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'actor')
if self.ap.algorithm.use_target_network_for_evaluation:
actor_network = self.networks['actor'].target_network
else:
actor_network = self.networks['actor'].online_network
action_values = actor_network.predict(tf_input_state).squeeze()
action = self.exploration_policy.get_action(action_values)
self.action_signal.add_sample(action)
# get q value
tf_input_state = self.prepare_batch_for_inference(curr_state, 'critic')
action_batch = np.expand_dims(action, 0)
if type(action) != np.ndarray:
action_batch = np.array([[action]])
tf_input_state['action'] = action_batch
q_value = self.networks['critic'].online_network.predict(tf_input_state)[0]
self.q_values.add_sample(q_value)
action_info = ActionInfo(action=action,
action_value=q_value)
return action_info | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/ddpg_agent.py | 0.894787 | 0.344636 | ddpg_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.ddpg_agent import DDPGAgent, DDPGAgentParameters, DDPGAlgorithmParameters
from rl_coach.core_types import RunPhase
from rl_coach.spaces import SpacesDefinition
class HACDDPGAlgorithmParameters(DDPGAlgorithmParameters):
"""
:param time_limit: (int)
The number of steps the agent is allowed to act for while trying to achieve its goal
:param sub_goal_testing_rate: (float)
The percent of episodes that will be used for testing the sub goals generated by the upper level agents.
"""
def __init__(self):
super().__init__()
self.time_limit = 40
self.sub_goal_testing_rate = 0.5
class HACDDPGAgentParameters(DDPGAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = HACDDPGAlgorithmParameters()
@property
def path(self):
return 'rl_coach.agents.hac_ddpg_agent:HACDDPGAgent'
# Hierarchical Actor Critic Generating Subgoals DDPG Agent - https://arxiv.org/pdf/1712.00948.pdf
class HACDDPGAgent(DDPGAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.sub_goal_testing_rate = self.ap.algorithm.sub_goal_testing_rate
self.graph_manager = None
def choose_action(self, curr_state):
# top level decides, for each of his generated sub-goals, for all the layers beneath him if this is a sub-goal
# testing phase
graph_manager = self.parent_level_manager.parent_graph_manager
if self.ap.is_a_highest_level_agent:
graph_manager.should_test_current_sub_goal = np.random.rand() < self.sub_goal_testing_rate
if self.phase == RunPhase.TRAIN:
if graph_manager.should_test_current_sub_goal:
self.exploration_policy.change_phase(RunPhase.TEST)
else:
self.exploration_policy.change_phase(self.phase)
action_info = super().choose_action(curr_state)
return action_info
def update_transition_before_adding_to_replay_buffer(self, transition):
graph_manager = self.parent_level_manager.parent_graph_manager
# deal with goals given from a higher level agent
if not self.ap.is_a_highest_level_agent:
transition.state['desired_goal'] = self.current_hrl_goal
transition.next_state['desired_goal'] = self.current_hrl_goal
# TODO: allow setting goals which are not part of the state. e.g. state-embedding using get_prediction
self.distance_from_goal.add_sample(self.spaces.goal.distance_from_goal(
self.current_hrl_goal, transition.next_state))
goal_reward, sub_goal_reached = self.spaces.goal.get_reward_for_goal_and_state(
self.current_hrl_goal, transition.next_state)
transition.reward = goal_reward
transition.game_over = transition.game_over or sub_goal_reached
# each level tests its own generated sub goals
if not self.ap.is_a_lowest_level_agent and graph_manager.should_test_current_sub_goal:
#TODO-fixme
# _, sub_goal_reached = self.parent_level_manager.environment.agents['agent_1'].spaces.goal.\
# get_reward_for_goal_and_state(transition.action, transition.next_state)
_, sub_goal_reached = self.spaces.goal.get_reward_for_goal_and_state(
transition.action, transition.next_state)
sub_goal_is_missed = not sub_goal_reached
if sub_goal_is_missed:
transition.reward = -self.ap.algorithm.time_limit
return transition
def set_environment_parameters(self, spaces: SpacesDefinition):
super().set_environment_parameters(spaces)
if self.ap.is_a_highest_level_agent:
# the rest of the levels already have an in_action_space set to be of type GoalsSpace, thus they will have
# their GoalsSpace set to the in_action_space in agent.set_environment_parameters()
self.spaces.goal = self.spaces.action
self.spaces.goal.set_target_space(self.spaces.state[self.spaces.goal.goal_name])
if not self.ap.is_a_highest_level_agent:
self.spaces.reward.reward_success_threshold = self.spaces.goal.reward_type.goal_reaching_reward | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/hac_ddpg_agent.py | 0.735737 | 0.487673 | hac_ddpg_agent.py | pypi |
import os
import pickle
from typing import Union, List
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DNDQHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters
from rl_coach.core_types import RunPhase, EnvironmentSteps, Episode, StateType
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters, MemoryGranularity
from rl_coach.schedules import ConstantSchedule
class NECNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DNDQHeadParameters()]
self.optimizer_type = 'Adam'
self.should_get_softmax_probabilities = False
class NECAlgorithmParameters(AlgorithmParameters):
"""
:param dnd_size: (int)
Defines the number of transitions that will be stored in each one of the DNDs. Note that the total number
of transitions that will be stored is dnd_size x num_actions.
:param l2_norm_added_delta: (float)
A small value that will be added when calculating the weight of each of the DND entries. This follows the
:math:`\delta` patameter defined in the paper.
:param new_value_shift_coefficient: (float)
In the case where a ew embedding that was added to the DND was already present, the value that will be stored
in the DND is a mix between the existing value and the new value. The mix rate is defined by
new_value_shift_coefficient.
:param number_of_knn: (int)
The number of neighbors that will be retrieved for each DND query.
:param DND_key_error_threshold: (float)
When the DND is queried for a specific embedding, this threshold will be used to determine if the embedding
exists in the DND, since exact matches of embeddings are very rare.
:param propagate_updates_to_DND: (bool)
If set to True, when the gradients of the network will be calculated, the gradients will also be
backpropagated through the keys of the DND. The keys will then be updated as well, as if they were regular
network weights.
:param n_step: (int)
The bootstrap length that will be used when calculating the state values to store in the DND.
:param bootstrap_total_return_from_old_policy: (bool)
If set to True, the bootstrap that will be used to calculate each state-action value, is the network value
when the state was first seen, and not the latest, most up-to-date network value.
"""
def __init__(self):
super().__init__()
self.dnd_size = 500000
self.l2_norm_added_delta = 0.001
self.new_value_shift_coefficient = 0.1
self.number_of_knn = 50
self.DND_key_error_threshold = 0
self.num_consecutive_playing_steps = EnvironmentSteps(4)
self.propagate_updates_to_DND = False
self.n_step = 100
self.bootstrap_total_return_from_old_policy = True
class NECMemoryParameters(EpisodicExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 100000)
class NECAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NECAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=NECMemoryParameters(),
networks={"main": NECNetworkParameters()})
self.exploration.epsilon_schedule = ConstantSchedule(0.1)
self.exploration.evaluation_epsilon = 0.01
@property
def path(self):
return 'rl_coach.agents.nec_agent:NECAgent'
# Neural Episodic Control - https://arxiv.org/pdf/1703.01988.pdf
class NECAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.current_episode_state_embeddings = []
self.training_started = False
self.current_episode_buffer = \
Episode(discount=self.ap.algorithm.discount,
n_step=self.ap.algorithm.n_step,
bootstrap_total_return_from_old_policy=self.ap.algorithm.bootstrap_total_return_from_old_policy)
def learn_from_batch(self, batch):
if not self.networks['main'].online_network.output_heads[0].DND.has_enough_entries(self.ap.algorithm.number_of_knn):
return 0, [], 0
else:
if not self.training_started:
self.training_started = True
screen.log_title("Finished collecting initial entries in DND. Starting to train network...")
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
TD_targets = self.networks['main'].online_network.predict(batch.states(network_keys))
bootstrapped_return_from_old_policy = batch.n_step_discounted_rewards()
# only update the action that we have actually done in this transition
for i in range(batch.size):
TD_targets[i, batch.actions()[i]] = bootstrapped_return_from_old_policy[i]
# set the gradients to fetch for the DND update
fetches = []
head = self.networks['main'].online_network.output_heads[0]
if self.ap.algorithm.propagate_updates_to_DND:
fetches = [head.dnd_embeddings_grad, head.dnd_values_grad, head.dnd_indices]
# train the neural network
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets, fetches)
total_loss, losses, unclipped_grads = result[:3]
# update the DND keys and values using the extracted gradients
if self.ap.algorithm.propagate_updates_to_DND:
embedding_gradients = np.swapaxes(result[-1][0], 0, 1)
value_gradients = np.swapaxes(result[-1][1], 0, 1)
indices = np.swapaxes(result[-1][2], 0, 1)
head.DND.update_keys_and_values(batch.actions(), embedding_gradients, value_gradients, indices)
return total_loss, losses, unclipped_grads
def act(self):
if self.phase == RunPhase.HEATUP:
# get embedding in heatup (otherwise we get it through get_prediction)
embedding = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(self.curr_state, 'main'),
outputs=self.networks['main'].online_network.state_embedding)
self.current_episode_state_embeddings.append(embedding.squeeze())
return super().act()
def get_all_q_values_for_states(self, states: StateType, additional_outputs: List = None):
# we need to store the state embeddings regardless if the action is random or not
return self.get_prediction_and_update_embeddings(states)
def get_all_q_values_for_states_and_softmax_probabilities(self, states: StateType):
# get the actions q values and the state embedding
embedding, actions_q_values, softmax_probabilities = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(states, 'main'),
outputs=[self.networks['main'].online_network.state_embedding,
self.networks['main'].online_network.output_heads[0].output,
self.networks['main'].online_network.output_heads[0].softmax]
)
if self.phase != RunPhase.TEST:
# store the state embedding for inserting it to the DND later
self.current_episode_state_embeddings.append(embedding.squeeze())
actions_q_values = actions_q_values[0][0]
return actions_q_values, softmax_probabilities
def get_prediction_and_update_embeddings(self, states):
# get the actions q values and the state embedding
embedding, actions_q_values = self.networks['main'].online_network.predict(
self.prepare_batch_for_inference(states, 'main'),
outputs=[self.networks['main'].online_network.state_embedding,
self.networks['main'].online_network.output_heads[0].output]
)
if self.phase != RunPhase.TEST:
# store the state embedding for inserting it to the DND later
self.current_episode_state_embeddings.append(embedding[0].squeeze())
actions_q_values = actions_q_values[0][0]
return actions_q_values
def reset_internal_state(self):
super().reset_internal_state()
self.current_episode_state_embeddings = []
self.current_episode_buffer = \
Episode(discount=self.ap.algorithm.discount,
n_step=self.ap.algorithm.n_step,
bootstrap_total_return_from_old_policy=self.ap.algorithm.bootstrap_total_return_from_old_policy)
def handle_episode_ended(self):
super().handle_episode_ended()
# get the last full episode that we have collected
episode = self.call_memory('get_last_complete_episode')
if episode is not None and self.phase != RunPhase.TEST:
assert len(self.current_episode_state_embeddings) == episode.length()
discounted_rewards = episode.get_transitions_attribute('n_step_discounted_rewards')
actions = episode.get_transitions_attribute('action')
self.networks['main'].online_network.output_heads[0].DND.add(self.current_episode_state_embeddings,
actions, discounted_rewards)
def save_checkpoint(self, checkpoint_prefix):
super().save_checkpoint(checkpoint_prefix)
with open(os.path.join(self.ap.task_parameters.checkpoint_save_dir, str(checkpoint_prefix) + '.dnd'), 'wb') as f:
pickle.dump(self.networks['main'].online_network.output_heads[0].DND, f, pickle.HIGHEST_PROTOCOL) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/nec_agent.py | 0.903955 | 0.383786 | nec_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PolicyHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
class PolicyGradientNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [PolicyHeadParameters()]
self.async_training = True
class PolicyGradientAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
The rescaler type to use for the policy gradient loss. For policy gradients, we calculate log probability of
the action and then multiply it by the policy gradient rescaler. The most basic rescaler is the discounter
return, but there are other rescalers that are intended for reducing the variance of the updates.
:param apply_gradients_every_x_episodes: (int)
The number of episodes between applying the accumulated gradients to the network. After every
num_steps_between_gradient_updates steps, the agent will calculate the gradients for the collected data,
it will then accumulate it in internal accumulators, and will only apply them to the network once in every
apply_gradients_every_x_episodes episodes.
:param beta_entropy: (float)
A factor which defines the amount of entropy regularization to apply to the network. The entropy of the actions
will be added to the loss and scaled by the given beta factor.
:param num_steps_between_gradient_updates: (int)
The number of steps between calculating gradients for the collected data. In the A3C paper, this parameter is
called t_max. Since this algorithm is on-policy, only the steps collected between each two gradient calculations
are used in the batch.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_TIMESTEP
self.apply_gradients_every_x_episodes = 5
self.beta_entropy = 0
self.num_steps_between_gradient_updates = 20000 # this is called t_max in all the papers
class PolicyGradientsAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=PolicyGradientAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: AdditiveNoiseParameters()},
memory=SingleEpisodeBufferParameters(),
networks={"main": PolicyGradientNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.policy_gradients_agent:PolicyGradientsAgent'
class PolicyGradientsAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.returns_mean = self.register_signal('Returns Mean')
self.returns_variance = self.register_signal('Returns Variance')
self.last_gradient_update_step_idx = 0
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
total_returns = batch.n_step_discounted_rewards()
for i in reversed(range(batch.size)):
if self.policy_gradient_rescaler == PolicyGradientRescaler.TOTAL_RETURN:
total_returns[i] = total_returns[0]
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN:
# just take the total return as it is
pass
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_EPISODE:
# we can get a single transition episode while playing Doom Basic, causing the std to be 0
if self.std_discounted_return != 0:
total_returns[i] = (total_returns[i] - self.mean_discounted_return) / self.std_discounted_return
else:
total_returns[i] = 0
elif self.policy_gradient_rescaler == PolicyGradientRescaler.FUTURE_RETURN_NORMALIZED_BY_TIMESTEP:
total_returns[i] -= self.mean_return_over_multiple_episodes[i]
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
targets = total_returns
actions = batch.actions()
if type(self.spaces.action) != DiscreteActionSpace and len(actions.shape) < 2:
actions = np.expand_dims(actions, -1)
self.returns_mean.add_sample(np.mean(total_returns))
self.returns_variance.add_sample(np.std(total_returns))
result = self.networks['main'].online_network.accumulate_gradients(
{**batch.states(network_keys), 'output_0_0': actions}, targets
)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/policy_gradients_agent.py | 0.950365 | 0.443058 | policy_gradients_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, NetworkParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.utils import last_sample
class NStepQNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [QHeadParameters()]
self.optimizer_type = 'Adam'
self.async_training = True
self.shared_optimizer = True
self.create_target_network = True
class NStepQAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param apply_gradients_every_x_episodes: (int)
The number of episodes between applying the accumulated gradients to the network. After every
num_steps_between_gradient_updates steps, the agent will calculate the gradients for the collected data,
it will then accumulate it in internal accumulators, and will only apply them to the network once in every
apply_gradients_every_x_episodes episodes.
:param num_steps_between_gradient_updates: (int)
The number of steps between calculating gradients for the collected data. In the A3C paper, this parameter is
called t_max. Since this algorithm is on-policy, only the steps collected between each two gradient calculations
are used in the batch.
:param targets_horizon: (str)
Should be either 'N-Step' or '1-Step', and defines the length for which to bootstrap the network values over.
Essentially, 1-Step follows the regular 1 step bootstrapping Q learning update. For more information,
please refer to the original paper (https://arxiv.org/abs/1602.01783)
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(10000)
self.apply_gradients_every_x_episodes = 1
self.num_steps_between_gradient_updates = 5 # this is called t_max in all the papers
self.targets_horizon = 'N-Step'
class NStepQAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NStepQAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=SingleEpisodeBufferParameters(),
networks={"main": NStepQNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.n_step_q_agent:NStepQAgent'
# N Step Q Learning Agent - https://arxiv.org/abs/1602.01783
class NStepQAgent(ValueOptimizationAgent, PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.q_values = self.register_signal('Q Values')
self.value_loss = self.register_signal('Value Loss')
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
state_value_head_targets = self.networks['main'].online_network.predict(batch.states(network_keys))
# the targets for the state value estimator
if self.ap.algorithm.targets_horizon == '1-Step':
# 1-Step Q learning
q_st_plus_1 = self.networks['main'].target_network.predict(batch.next_states(network_keys))
for i in reversed(range(batch.size)):
state_value_head_targets[i][batch.actions()[i]] = \
batch.rewards()[i] \
+ (1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * np.max(q_st_plus_1[i], 0)
elif self.ap.algorithm.targets_horizon == 'N-Step':
# N-Step Q learning
if batch.game_overs()[-1]:
R = 0
else:
R = np.max(self.networks['main'].target_network.predict(last_sample(batch.next_states(network_keys))))
for i in reversed(range(batch.size)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i][batch.actions()[i]] = R
else:
assert True, 'The available values for targets_horizon are: 1-Step, N-Step'
# add Q value samples for logging
self.q_values.add_sample(state_value_head_targets)
# train
result = self.networks['main'].online_network.accumulate_gradients(batch.states(network_keys), [state_value_head_targets])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.value_loss.add_sample(losses[0])
return total_loss, losses, unclipped_grads
def train(self):
# update the target network of every network that has a target network
if any([network.has_target for network in self.networks.values()]) \
and self._should_update_online_weights_to_target():
for network in self.networks.values():
network.update_target_network(self.ap.algorithm.rate_for_copying_weights_to_target)
self.agent_logger.create_signal_value('Update Target Network', 1)
else:
self.agent_logger.create_signal_value('Update Target Network', 0, overwrite=False)
return PolicyOptimizationAgent.train(self) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/n_step_q_agent.py | 0.924947 | 0.416678 | n_step_q_agent.py | pypi |
from typing import Union, List, Dict
import numpy as np
from rl_coach.core_types import EnvResponse, ActionInfo, RunPhase, PredictionType, ActionType, Transition
from rl_coach.saver import SaverCollection
class AgentInterface(object):
def __init__(self):
self._phase = RunPhase.HEATUP
self._parent = None
self.spaces = None
@property
def parent(self):
"""
Get the parent class of the agent
:return: the current phase
"""
return self._parent
@parent.setter
def parent(self, val):
"""
Change the parent class of the agent
:param val: the new parent
:return: None
"""
self._parent = val
@property
def phase(self) -> RunPhase:
"""
Get the phase of the agent
:return: the current phase
"""
return self._phase
@phase.setter
def phase(self, val: RunPhase):
"""
Change the phase of the agent
:param val: the new phase
:return: None
"""
self._phase = val
def reset_internal_state(self) -> None:
"""
Reset the episode parameters for the agent
:return: None
"""
raise NotImplementedError("")
def train(self) -> Union[float, List]:
"""
Train the agents network
:return: The loss of the training
"""
raise NotImplementedError("")
def act(self) -> ActionInfo:
"""
Get a decision of the next action to take.
The action is dependent on the current state which the agent holds from resetting the environment or from
the observe function.
:return: A tuple containing the actual action and additional info on the action
"""
raise NotImplementedError("")
def observe(self, env_response: EnvResponse) -> bool:
"""
Gets a response from the environment.
Processes this information for later use. For example, create a transition and store it in memory.
The action info (a class containing any info the agent wants to store regarding its action decision process) is
stored by the agent itself when deciding on the action.
:param env_response: a EnvResponse containing the response from the environment
:return: a done signal which is based on the agent knowledge. This can be different from the done signal from
the environment. For example, an agent can decide to finish the episode each time it gets some
intrinsic reward
"""
raise NotImplementedError("")
def save_checkpoint(self, checkpoint_prefix: str) -> None:
"""
Save the model of the agent to the disk. This can contain the network parameters, the memory of the agent, etc.
:param checkpoint_prefix: The prefix of the checkpoint file to save
:return: None
"""
raise NotImplementedError("")
def get_predictions(self, states: Dict, prediction_type: PredictionType) -> np.ndarray:
"""
Get a prediction from the agent with regard to the requested prediction_type. If the agent cannot predict this
type of prediction_type, or if there is more than possible way to do so, raise a ValueException.
:param states:
:param prediction_type:
:return: the agent's prediction
"""
raise NotImplementedError("")
def set_incoming_directive(self, action: ActionType) -> None:
"""
Pass a higher level command (directive) to the agent.
For example, a higher level agent can set the goal of the agent.
:param action: the directive to pass to the agent
:return: None
"""
raise NotImplementedError("")
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collect all of agent savers
:param parent_path_suffix: path suffix of the parent of the agent
(could be name of level manager or composite agent)
:return: collection of all agent savers
"""
raise NotImplementedError("")
def handle_episode_ended(self) -> None:
"""
Make any changes needed when each episode is ended.
This includes incrementing counters, updating full episode dependent values, updating logs, etc.
This function is called right after each episode is ended.
:return: None
"""
raise NotImplementedError("")
def run_off_policy_evaluation(self) -> None:
"""
Run off-policy evaluation estimators to evaluate the trained policy performance against a dataset.
Should only be implemented for off-policy RL algorithms.
:return: None
"""
raise NotImplementedError("") | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/agent_interface.py | 0.932538 | 0.649064 | agent_interface.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNAgentParameters, DQNAlgorithmParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
class PALAlgorithmParameters(DQNAlgorithmParameters):
"""
:param pal_alpha: (float)
A factor that weights the amount by which the advantage learning update will be taken into account.
:param persistent_advantage_learning: (bool)
If set to True, the persistent mode of advantage learning will be used, which encourages the agent to take
the same actions one after the other instead of changing actions.
:param monte_carlo_mixing_rate: (float)
The amount of monte carlo values to mix into the targets of the network. The monte carlo values are just the
total discounted returns, and they can help reduce the time it takes for the network to update to the newly
seen values, since it is not based on bootstrapping the current network values.
"""
def __init__(self):
super().__init__()
self.pal_alpha = 0.9
self.persistent_advantage_learning = False
self.monte_carlo_mixing_rate = 0.1
class PALAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = PALAlgorithmParameters()
self.memory = EpisodicExperienceReplayParameters()
@property
def path(self):
return 'rl_coach.agents.pal_agent:PALAgent'
# Persistent Advantage Learning - https://arxiv.org/pdf/1512.04860.pdf
class PALAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.alpha = agent_parameters.algorithm.pal_alpha
self.persistent = agent_parameters.algorithm.persistent_advantage_learning
self.monte_carlo_mixing_rate = agent_parameters.algorithm.monte_carlo_mixing_rate
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# next state values
q_st_plus_1_target, q_st_plus_1_online = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.next_states(network_keys))
])
selected_actions = np.argmax(q_st_plus_1_online, 1)
v_st_plus_1_target = np.max(q_st_plus_1_target, 1)
# current state values
q_st_target, q_st_online = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
v_st_target = np.max(q_st_target, 1)
# calculate TD error
TD_targets = np.copy(q_st_online)
total_returns = batch.n_step_discounted_rewards()
for i in range(batch.size):
TD_targets[i, batch.actions()[i]] = batch.rewards()[i] + \
(1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * \
q_st_plus_1_target[i][selected_actions[i]]
advantage_learning_update = v_st_target[i] - q_st_target[i, batch.actions()[i]]
next_advantage_learning_update = v_st_plus_1_target[i] - q_st_plus_1_target[i, selected_actions[i]]
# Persistent Advantage Learning or Regular Advantage Learning
if self.persistent:
TD_targets[i, batch.actions()[i]] -= self.alpha * min(advantage_learning_update, next_advantage_learning_update)
else:
TD_targets[i, batch.actions()[i]] -= self.alpha * advantage_learning_update
# mixing monte carlo updates
monte_carlo_target = total_returns[i]
TD_targets[i, batch.actions()[i]] = (1 - self.monte_carlo_mixing_rate) * TD_targets[i, batch.actions()[i]] \
+ self.monte_carlo_mixing_rate * monte_carlo_target
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/pal_agent.py | 0.909963 | 0.464719 | pal_agent.py | pypi |
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNNetworkParameters, DQNAgentParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.exploration_policies.bootstrapped import BootstrappedParameters
class BootstrappedDQNNetworkParameters(DQNNetworkParameters):
def __init__(self):
super().__init__()
self.heads_parameters[0].num_output_head_copies = 10
self.heads_parameters[0].rescale_gradient_from_head_by_factor = 1.0/self.heads_parameters[0].num_output_head_copies
class BootstrappedDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.exploration = BootstrappedParameters()
self.network_wrappers = {"main": BootstrappedDQNNetworkParameters()}
@property
def path(self):
return 'rl_coach.agents.bootstrapped_dqn_agent:BootstrappedDQNAgent'
# Bootstrapped DQN - https://arxiv.org/pdf/1602.04621.pdf
class BootstrappedDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
def reset_internal_state(self):
super().reset_internal_state()
self.exploration_policy.select_head()
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
next_states_online_values = self.networks['main'].online_network.predict(batch.next_states(network_keys))
result = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
q_st_plus_1 = result[:self.ap.exploration.architecture_num_q_heads]
TD_targets = result[self.ap.exploration.architecture_num_q_heads:]
# add Q value samples for logging
# initialize with the current prediction so that we will
# only update the action that we have actually done in this transition
for i in range(batch.size):
mask = batch[i].info['mask']
for head_idx in range(self.ap.exploration.architecture_num_q_heads):
self.q_values.add_sample(TD_targets[head_idx])
if mask[head_idx] == 1:
selected_action = np.argmax(next_states_online_values[head_idx][i], 0)
TD_targets[head_idx][i, batch.actions()[i]] = \
batch.rewards()[i] + (1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount \
* q_st_plus_1[head_idx][i][selected_action]
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def observe(self, env_response):
mask = np.random.binomial(1, self.ap.exploration.bootstrapped_data_sharing_probability,
self.ap.exploration.architecture_num_q_heads)
env_response.info['mask'] = mask
return super().observe(env_response) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/bootstrapped_dqn_agent.py | 0.895651 | 0.238972 | bootstrapped_dqn_agent.py | pypi |
from copy import copy
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNAgentParameters, DQNNetworkParameters, DQNAlgorithmParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.head_parameters import QuantileRegressionQHeadParameters
from rl_coach.core_types import StateType
from rl_coach.schedules import LinearSchedule
class QuantileRegressionDQNNetworkParameters(DQNNetworkParameters):
def __init__(self):
super().__init__()
self.heads_parameters = [QuantileRegressionQHeadParameters()]
self.learning_rate = 0.00005
self.optimizer_epsilon = 0.01 / 32
class QuantileRegressionDQNAlgorithmParameters(DQNAlgorithmParameters):
"""
:param atoms: (int)
the number of atoms to predict for each action
:param huber_loss_interval: (float)
One of the huber loss parameters, and is referred to as :math:`\kapa` in the paper.
It describes the interval [-k, k] in which the huber loss acts as a MSE loss.
"""
def __init__(self):
super().__init__()
self.atoms = 200
self.huber_loss_interval = 1 # called k in the paper
class QuantileRegressionDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm = QuantileRegressionDQNAlgorithmParameters()
self.network_wrappers = {"main": QuantileRegressionDQNNetworkParameters()}
self.exploration.epsilon_schedule = LinearSchedule(1, 0.01, 1000000)
self.exploration.evaluation_epsilon = 0.001
@property
def path(self):
return 'rl_coach.agents.qr_dqn_agent:QuantileRegressionDQNAgent'
# Quantile Regression Deep Q Network - https://arxiv.org/pdf/1710.10044v1.pdf
class QuantileRegressionDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.quantile_probabilities = np.ones(self.ap.algorithm.atoms) / float(self.ap.algorithm.atoms)
def get_q_values(self, quantile_values):
return np.dot(quantile_values, self.quantile_probabilities)
# prediction's format is (batch,actions,atoms)
def get_all_q_values_for_states(self, states: StateType):
if self.exploration_policy.requires_action_values():
quantile_values = self.get_prediction(states)
actions_q_values = self.get_q_values(quantile_values)
else:
actions_q_values = None
return actions_q_values
# prediction's format is (batch,actions,atoms)
def get_all_q_values_for_states_and_softmax_probabilities(self, states: StateType):
actions_q_values, softmax_probabilities = None, None
if self.exploration_policy.requires_action_values():
outputs = copy(self.networks['main'].online_network.outputs)
outputs.append(self.networks['main'].online_network.output_heads[0].softmax)
quantile_values, softmax_probabilities = self.get_prediction(states, outputs)
actions_q_values = self.get_q_values(quantile_values)
return actions_q_values, softmax_probabilities
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the quantiles of the next states and current states
next_state_quantiles, current_quantiles = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
# add Q value samples for logging
self.q_values.add_sample(self.get_q_values(current_quantiles))
# get the optimal actions to take for the next states
target_actions = np.argmax(self.get_q_values(next_state_quantiles), axis=1)
# calculate the Bellman update
batch_idx = list(range(batch.size))
TD_targets = batch.rewards(True) + (1.0 - batch.game_overs(True)) * self.ap.algorithm.discount \
* next_state_quantiles[batch_idx, target_actions]
# get the locations of the selected actions within the batch for indexing purposes
actions_locations = [[b, a] for b, a in zip(batch_idx, batch.actions())]
# calculate the cumulative quantile probabilities and reorder them to fit the sorted quantiles order
cumulative_probabilities = np.array(range(self.ap.algorithm.atoms + 1)) / float(self.ap.algorithm.atoms) # tau_i
quantile_midpoints = 0.5*(cumulative_probabilities[1:] + cumulative_probabilities[:-1]) # tau^hat_i
quantile_midpoints = np.tile(quantile_midpoints, (batch.size, 1))
sorted_quantiles = np.argsort(current_quantiles[batch_idx, batch.actions()])
for idx in range(batch.size):
quantile_midpoints[idx, :] = quantile_midpoints[idx, sorted_quantiles[idx]]
# train
result = self.networks['main'].train_and_sync_networks({
**batch.states(network_keys),
'output_0_0': actions_locations,
'output_0_1': quantile_midpoints,
}, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/qr_dqn_agent.py | 0.932829 | 0.532911 | qr_dqn_agent.py | pypi |
import copy
from collections import OrderedDict
from typing import Union
import numpy as np
from rl_coach.agents.actor_critic_agent import ActorCriticAgent
from rl_coach.agents.policy_optimization_agent import PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PPOHeadParameters, VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \
AgentParameters, DistributedTaskParameters
from rl_coach.core_types import EnvironmentSteps, Batch
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.utils import force_list
class PPOCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='tanh')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='tanh')
self.heads_parameters = [VHeadParameters()]
self.async_training = True
self.l2_regularization = 0
self.create_target_network = True
self.batch_size = 128
class PPOActorNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='tanh')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='tanh')
self.heads_parameters = [PPOHeadParameters()]
self.optimizer_type = 'Adam'
self.async_training = True
self.l2_regularization = 0
self.create_target_network = True
self.batch_size = 128
class PPOAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
This represents how the critic will be used to update the actor. The critic value function is typically used
to rescale the gradients calculated by the actor. There are several ways for doing this, such as using the
advantage of the action, or the generalized advantage estimation (GAE) value.
:param gae_lambda: (float)
The :math:`\lambda` value is used within the GAE function in order to weight different bootstrap length
estimations. Typical values are in the range 0.9-1, and define an exponential decay over the different
n-step estimations.
:param target_kl_divergence: (float)
The target kl divergence between the current policy distribution and the new policy. PPO uses a heuristic to
bring the KL divergence to this value, by adding a penalty if the kl divergence is higher.
:param initial_kl_coefficient: (float)
The initial weight that will be given to the KL divergence between the current and the new policy in the
regularization factor.
:param high_kl_penalty_coefficient: (float)
The penalty that will be given for KL divergence values which are highes than what was defined as the target.
:param clip_likelihood_ratio_using_epsilon: (float)
If not None, the likelihood ratio between the current and new policy in the PPO loss function will be
clipped to the range [1-clip_likelihood_ratio_using_epsilon, 1+clip_likelihood_ratio_using_epsilon].
This is typically used in the Clipped PPO version of PPO, and should be set to None in regular PPO
implementations.
:param value_targets_mix_fraction: (float)
The targets for the value network are an exponential weighted moving average which uses this mix fraction to
define how much of the new targets will be taken into account when calculating the loss.
This value should be set to the range (0,1], where 1 means that only the new targets will be taken into account.
:param estimate_state_value_using_gae: (bool)
If set to True, the state value will be estimated using the GAE technique.
:param use_kl_regularization: (bool)
If set to True, the loss function will be regularized using the KL diveregence between the current and new
policy, to bound the change of the policy during the network update.
:param beta_entropy: (float)
An entropy regulaization term can be added to the loss function in order to control exploration. This term
is weighted using the :math:`\beta` value defined by beta_entropy.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.GAE
self.gae_lambda = 0.96
self.target_kl_divergence = 0.01
self.initial_kl_coefficient = 1.0
self.high_kl_penalty_coefficient = 1000
self.clip_likelihood_ratio_using_epsilon = None
self.value_targets_mix_fraction = 0.1
self.estimate_state_value_using_gae = True
self.use_kl_regularization = True
self.beta_entropy = 0.01
self.num_consecutive_playing_steps = EnvironmentSteps(5000)
self.act_for_full_episodes = True
class PPOAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=PPOAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: AdditiveNoiseParameters()},
memory=EpisodicExperienceReplayParameters(),
networks={"critic": PPOCriticNetworkParameters(), "actor": PPOActorNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.ppo_agent:PPOAgent'
# Proximal Policy Optimization - https://arxiv.org/pdf/1707.06347.pdf
class PPOAgent(ActorCriticAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
# signals definition
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
self.kl_divergence = self.register_signal('KL Divergence')
self.total_kl_divergence_during_training_process = 0.0
self.unclipped_grads = self.register_signal('Grads (unclipped)')
def fill_advantages(self, batch):
batch = Batch(batch)
network_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# * Found not to have any impact *
# current_states_with_timestep = self.concat_state_and_timestep(batch)
current_state_values = self.networks['critic'].online_network.predict(batch.states(network_keys)).squeeze()
total_returns = batch.n_step_discounted_rewards()
# calculate advantages
advantages = []
if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:
advantages = total_returns - current_state_values
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
episode_start_idx = 0
advantages = np.array([])
# current_state_values[batch.game_overs()] = 0
for idx, game_over in enumerate(batch.game_overs()):
if game_over:
# get advantages for the rollout
value_bootstrapping = np.zeros((1,))
rollout_state_values = np.append(current_state_values[episode_start_idx:idx+1], value_bootstrapping)
rollout_advantages, _ = \
self.get_general_advantage_estimation_values(batch.rewards()[episode_start_idx:idx+1],
rollout_state_values)
episode_start_idx = idx + 1
advantages = np.append(advantages, rollout_advantages)
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
# standardize
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# TODO: this will be problematic with a shared memory
for transition, advantage in zip(self.memory.transitions, advantages):
transition.info['advantage'] = advantage
self.action_advantages.add_sample(advantages)
def train_value_network(self, dataset, epochs):
loss = []
batch = Batch(dataset)
network_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# * Found not to have any impact *
# add a timestep to the observation
# current_states_with_timestep = self.concat_state_and_timestep(dataset)
mix_fraction = self.ap.algorithm.value_targets_mix_fraction
total_returns = batch.n_step_discounted_rewards(True)
for j in range(epochs):
curr_batch_size = batch.size
if self.networks['critic'].online_network.optimizer_type != 'LBFGS':
curr_batch_size = self.ap.network_wrappers['critic'].batch_size
for i in range(batch.size // curr_batch_size):
# split to batches for first order optimization techniques
current_states_batch = {
k: v[i * curr_batch_size:(i + 1) * curr_batch_size]
for k, v in batch.states(network_keys).items()
}
total_return_batch = total_returns[i * curr_batch_size:(i + 1) * curr_batch_size]
old_policy_values = force_list(self.networks['critic'].target_network.predict(
current_states_batch).squeeze())
if self.networks['critic'].online_network.optimizer_type != 'LBFGS':
targets = total_return_batch
else:
current_values = self.networks['critic'].online_network.predict(current_states_batch)
targets = current_values * (1 - mix_fraction) + total_return_batch * mix_fraction
inputs = copy.copy(current_states_batch)
for input_index, input in enumerate(old_policy_values):
name = 'output_0_{}'.format(input_index)
if name in self.networks['critic'].online_network.inputs:
inputs[name] = input
value_loss = self.networks['critic'].online_network.accumulate_gradients(inputs, targets)
self.networks['critic'].apply_gradients_to_online_network()
if isinstance(self.ap.task_parameters, DistributedTaskParameters):
self.networks['critic'].apply_gradients_to_global_network()
self.networks['critic'].online_network.reset_accumulated_gradients()
loss.append([value_loss[0]])
loss = np.mean(loss, 0)
return loss
def concat_state_and_timestep(self, dataset):
current_states_with_timestep = [np.append(transition.state['observation'], transition.info['timestep'])
for transition in dataset]
current_states_with_timestep = np.expand_dims(current_states_with_timestep, -1)
return current_states_with_timestep
def train_policy_network(self, dataset, epochs):
loss = []
for j in range(epochs):
loss = {
'total_loss': [],
'policy_losses': [],
'unclipped_grads': [],
'fetch_result': []
}
#shuffle(dataset)
for i in range(len(dataset) // self.ap.network_wrappers['actor'].batch_size):
batch = Batch(dataset[i * self.ap.network_wrappers['actor'].batch_size:
(i + 1) * self.ap.network_wrappers['actor'].batch_size])
network_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
advantages = batch.info('advantage')
actions = batch.actions()
if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) == 1:
actions = np.expand_dims(actions, -1)
# get old policy probabilities and distribution
old_policy = force_list(self.networks['actor'].target_network.predict(batch.states(network_keys)))
# calculate gradients and apply on both the local policy network and on the global policy network
fetches = [self.networks['actor'].online_network.output_heads[0].kl_divergence,
self.networks['actor'].online_network.output_heads[0].entropy]
inputs = copy.copy(batch.states(network_keys))
inputs['output_0_0'] = actions
# old_policy_distribution needs to be represented as a list, because in the event of discrete controls,
# it has just a mean. otherwise, it has both a mean and standard deviation
for input_index, input in enumerate(old_policy):
inputs['output_0_{}'.format(input_index + 1)] = input
total_loss, policy_losses, unclipped_grads, fetch_result =\
self.networks['actor'].online_network.accumulate_gradients(
inputs, [advantages], additional_fetches=fetches)
self.networks['actor'].apply_gradients_to_online_network()
if isinstance(self.ap.task_parameters, DistributedTaskParameters):
self.networks['actor'].apply_gradients_to_global_network()
self.networks['actor'].online_network.reset_accumulated_gradients()
loss['total_loss'].append(total_loss)
loss['policy_losses'].append(policy_losses)
loss['unclipped_grads'].append(unclipped_grads)
loss['fetch_result'].append(fetch_result)
self.unclipped_grads.add_sample(unclipped_grads)
for key in loss.keys():
loss[key] = np.mean(loss[key], 0)
if self.ap.network_wrappers['critic'].learning_rate_decay_rate != 0:
curr_learning_rate = self.networks['critic'].online_network.get_variable_value(self.ap.learning_rate)
self.curr_learning_rate.add_sample(curr_learning_rate)
else:
curr_learning_rate = self.ap.network_wrappers['critic'].learning_rate
# log training parameters
screen.log_dict(
OrderedDict([
("Surrogate loss", loss['policy_losses'][0]),
("KL divergence", loss['fetch_result'][0]),
("Entropy", loss['fetch_result'][1]),
("training epoch", j),
("learning_rate", curr_learning_rate)
]),
prefix="Policy training"
)
self.total_kl_divergence_during_training_process = loss['fetch_result'][0]
self.entropy.add_sample(loss['fetch_result'][1])
self.kl_divergence.add_sample(loss['fetch_result'][0])
return loss['total_loss']
def update_kl_coefficient(self):
# John Schulman takes the mean kl divergence only over the last epoch which is strange but we will follow
# his implementation for now because we know it works well
screen.log_title("KL = {}".format(self.total_kl_divergence_during_training_process))
# update kl coefficient
kl_target = self.ap.algorithm.target_kl_divergence
kl_coefficient = self.networks['actor'].online_network.get_variable_value(
self.networks['actor'].online_network.output_heads[0].kl_coefficient)
new_kl_coefficient = kl_coefficient
if self.total_kl_divergence_during_training_process > 1.3 * kl_target:
# kl too high => increase regularization
new_kl_coefficient *= 1.5
elif self.total_kl_divergence_during_training_process < 0.7 * kl_target:
# kl too low => decrease regularization
new_kl_coefficient /= 1.5
# update the kl coefficient variable
if kl_coefficient != new_kl_coefficient:
self.networks['actor'].online_network.set_variable_value(
self.networks['actor'].online_network.output_heads[0].assign_kl_coefficient,
new_kl_coefficient,
self.networks['actor'].online_network.output_heads[0].kl_coefficient_ph)
screen.log_title("KL penalty coefficient change = {} -> {}".format(kl_coefficient, new_kl_coefficient))
def post_training_commands(self):
if self.ap.algorithm.use_kl_regularization:
self.update_kl_coefficient()
# clean memory
self.call_memory('clean')
def train(self):
loss = 0
if self._should_train():
for network in self.networks.values():
network.set_is_training(True)
for training_step in range(self.ap.algorithm.num_consecutive_training_steps):
self.networks['actor'].sync()
self.networks['critic'].sync()
dataset = self.memory.transitions
self.fill_advantages(dataset)
# take only the requested number of steps
dataset = dataset[:self.ap.algorithm.num_consecutive_playing_steps.num_steps]
value_loss = self.train_value_network(dataset, 1)
policy_loss = self.train_policy_network(dataset, 10)
self.value_loss.add_sample(value_loss)
self.policy_loss.add_sample(policy_loss)
for network in self.networks.values():
network.set_is_training(False)
self.post_training_commands()
self.training_iteration += 1
self.update_log() # should be done in order to update the data that has been accumulated * while not playing *
return np.append(value_loss, policy_loss)
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "actor")
return self.networks['actor'].online_network.predict(tf_input_state) | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/ppo_agent.py | 0.9415 | 0.441914 | ppo_agent.py | pypi |
from typing import Union
import numpy as np
import scipy.signal
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import PolicyHeadParameters, VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \
AgentParameters
from rl_coach.exploration_policies.categorical import CategoricalParameters
from rl_coach.exploration_policies.continuous_entropy import ContinuousEntropyParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.utils import last_sample
class ActorCriticAlgorithmParameters(AlgorithmParameters):
"""
:param policy_gradient_rescaler: (PolicyGradientRescaler)
The value that will be used to rescale the policy gradient
:param apply_gradients_every_x_episodes: (int)
The number of episodes to wait before applying the accumulated gradients to the network.
The training iterations only accumulate gradients without actually applying them.
:param beta_entropy: (float)
The weight that will be given to the entropy regularization which is used in order to improve exploration.
:param num_steps_between_gradient_updates: (int)
Every num_steps_between_gradient_updates transitions will be considered as a single batch and use for
accumulating gradients. This is also the number of steps used for bootstrapping according to the n-step formulation.
:param gae_lambda: (float)
If the policy gradient rescaler was defined as PolicyGradientRescaler.GAE, the generalized advantage estimation
scheme will be used, in which case the lambda value controls the decay for the different n-step lengths.
:param estimate_state_value_using_gae: (bool)
If set to True, the state value targets for the V head will be estimated using the GAE scheme.
"""
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE
self.apply_gradients_every_x_episodes = 5
self.beta_entropy = 0
self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers
self.gae_lambda = 0.96
self.estimate_state_value_using_gae = False
class ActorCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [VHeadParameters(loss_weight=0.5), PolicyHeadParameters(loss_weight=1.0)]
self.optimizer_type = 'Adam'
self.clip_gradients = 40.0
self.async_training = True
class ActorCriticAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=ActorCriticAlgorithmParameters(),
exploration={DiscreteActionSpace: CategoricalParameters(),
BoxActionSpace: ContinuousEntropyParameters()},
memory=SingleEpisodeBufferParameters(),
networks={"main": ActorCriticNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent'
# Actor Critic - https://arxiv.org/abs/1602.01783
class ActorCriticAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.action_advantages = self.register_signal('Advantages')
self.state_values = self.register_signal('Values')
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
# Discounting function used to calculate discounted returns.
def discount(self, x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def get_general_advantage_estimation_values(self, rewards, values):
# values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n)
bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]])
# Approximation based calculation of GAE (mathematically correct only when Tmax = inf,
# although in practice works even in much smaller Tmax values, e.g. 20)
deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1]
gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda)
if self.ap.algorithm.estimate_state_value_using_gae:
discounted_returns = np.expand_dims(gae + values[:-1], -1)
else:
discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards,
self.ap.algorithm.discount)), 1)[:-1]
return gae, discounted_returns
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
result = self.networks['main'].online_network.predict(batch.states(network_keys))
current_state_values = result[0]
self.state_values.add_sample(current_state_values)
# the targets for the state value estimator
num_transitions = batch.size
state_value_head_targets = np.zeros((num_transitions, 1))
# estimate the advantage function
action_advantages = np.zeros((num_transitions, 1))
if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:
if batch.game_overs()[-1]:
R = 0
else:
R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
for i in reversed(range(num_transitions)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i] = R
action_advantages[i] = R - current_state_values[i]
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
values = np.append(current_state_values, bootstrapped_value)
if batch.game_overs()[-1]:
values[-1] = 0
# get general discounted returns table
gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values)
action_advantages = np.vstack(gae_values)
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
action_advantages = action_advantages.squeeze(axis=-1)
actions = batch.actions()
if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2:
actions = np.expand_dims(actions, -1)
# train
result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys),
'output_1_0': actions},
[state_value_head_targets, action_advantages])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.action_advantages.add_sample(action_advantages)
self.unclipped_grads.add_sample(unclipped_grads)
self.value_loss.add_sample(losses[0])
self.policy_loss.add_sample(losses[1])
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "main")
return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/agents/actor_critic_agent.py | 0.935568 | 0.480052 | actor_critic_agent.py | pypi |
import redis
import pickle
import uuid
import time
from rl_coach.memories.backend.memory import MemoryBackend, MemoryBackendParameters
from rl_coach.core_types import Transition, Episode, EnvironmentSteps, EnvironmentEpisodes
class RedisPubSubMemoryBackendParameters(MemoryBackendParameters):
def __init__(self, redis_address: str="", redis_port: int=6379, channel: str="channel-{}".format(uuid.uuid4()),
orchestrator_params: dict=None, run_type='trainer', orchestrator_type: str = "kubernetes", deployed: str = False):
self.redis_address = redis_address
self.redis_port = redis_port
self.channel = channel
if not orchestrator_params:
orchestrator_params = {}
self.orchestrator_params = orchestrator_params
self.run_type = run_type
self.store_type = "redispubsub"
self.orchestrator_type = orchestrator_type
self.deployed = deployed
class RedisPubSubBackend(MemoryBackend):
"""
A memory backend which transfers the experiences from the rollout to the training worker using Redis Pub/Sub in
Coach when distributed mode is used.
"""
def __init__(self, params: RedisPubSubMemoryBackendParameters):
"""
:param params: The Redis parameters to be used with this Redis Pub/Sub instance.
"""
self.params = params
self.redis_connection = redis.Redis(self.params.redis_address, self.params.redis_port)
self.redis_server_name = 'redis-server-{}'.format(uuid.uuid4())
self.redis_service_name = 'redis-service-{}'.format(uuid.uuid4())
def store(self, obj):
"""
:param obj: The object to store in memory. The object is either a Tranisition or Episode type.
"""
self.redis_connection.publish(self.params.channel, pickle.dumps(obj))
def deploy(self):
"""
Deploy the Redis Pub/Sub service in an orchestrator.
"""
if not self.params.deployed:
if self.params.orchestrator_type == 'kubernetes':
self.deploy_kubernetes()
# Wait till subscribe to the channel is possible or else it will cause delays in the trainer.
time.sleep(10)
def deploy_kubernetes(self):
"""
Deploy the Redis Pub/Sub service in Kubernetes orchestrator.
"""
if 'namespace' not in self.params.orchestrator_params:
self.params.orchestrator_params['namespace'] = "default"
from kubernetes import client, config
container = client.V1Container(
name=self.redis_server_name,
image='redis:4-alpine',
resources=client.V1ResourceRequirements(
limits={
"cpu": "8",
"memory": "4Gi"
# "nvidia.com/gpu": "0",
}
),
)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={'app': self.redis_server_name}),
spec=client.V1PodSpec(
containers=[container]
)
)
deployment_spec = client.V1DeploymentSpec(
replicas=1,
template=template,
selector=client.V1LabelSelector(
match_labels={'app': self.redis_server_name}
)
)
deployment = client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(name=self.redis_server_name, labels={'app': self.redis_server_name}),
spec=deployment_spec
)
config.load_kube_config()
api_client = client.AppsV1Api()
try:
print(self.params.orchestrator_params)
api_client.create_namespaced_deployment(self.params.orchestrator_params['namespace'], deployment)
except client.rest.ApiException as e:
print("Got exception: %s\n while creating redis-server", e)
return False
core_v1_api = client.CoreV1Api()
service = client.V1Service(
api_version='v1',
kind='Service',
metadata=client.V1ObjectMeta(
name=self.redis_service_name
),
spec=client.V1ServiceSpec(
selector={'app': self.redis_server_name},
ports=[client.V1ServicePort(
protocol='TCP',
port=6379,
target_port=6379
)]
)
)
try:
core_v1_api.create_namespaced_service(self.params.orchestrator_params['namespace'], service)
self.params.redis_address = '{}.{}.svc'.format(
self.redis_service_name, self.params.orchestrator_params['namespace']
)
self.params.redis_port = 6379
return True
except client.rest.ApiException as e:
print("Got exception: %s\n while creating a service for redis-server", e)
return False
def undeploy(self):
"""
Undeploy the Redis Pub/Sub service in an orchestrator.
"""
from kubernetes import client
if self.params.deployed:
return
from kubernetes import client
api_client = client.AppsV1Api()
delete_options = client.V1DeleteOptions()
try:
api_client.delete_namespaced_deployment(self.redis_server_name, self.params.orchestrator_params['namespace'], delete_options)
except client.rest.ApiException as e:
print("Got exception: %s\n while deleting redis-server", e)
api_client = client.CoreV1Api()
try:
api_client.delete_namespaced_service(self.redis_service_name, self.params.orchestrator_params['namespace'], delete_options)
except client.rest.ApiException as e:
print("Got exception: %s\n while deleting redis-server", e)
def sample(self, size):
pass
def fetch(self, num_consecutive_playing_steps=None):
"""
:param num_consecutive_playing_steps: The number steps to fetch.
"""
return RedisSub(redis_address=self.params.redis_address, redis_port=self.params.redis_port, channel=self.params.channel).run(num_consecutive_playing_steps)
def subscribe(self, agent):
"""
:param agent: The agent in use.
"""
redis_sub = RedisSub(redis_address=self.params.redis_address, redis_port=self.params.redis_port, channel=self.params.channel)
return redis_sub
def get_endpoint(self):
return {'redis_address': self.params.redis_address,
'redis_port': self.params.redis_port}
class RedisSub(object):
def __init__(self, redis_address: str = "localhost", redis_port: int=6379, channel: str = "PubsubChannel"):
super().__init__()
self.redis_connection = redis.Redis(redis_address, redis_port)
self.pubsub = self.redis_connection.pubsub()
self.subscriber = None
self.channel = channel
self.subscriber = self.pubsub.subscribe(self.channel)
def run(self, num_consecutive_playing_steps):
"""
:param num_consecutive_playing_steps: The number steps to fetch.
"""
transitions = 0
episodes = 0
steps = 0
for message in self.pubsub.listen():
if message and 'data' in message:
try:
obj = pickle.loads(message['data'])
if type(obj) == Transition:
transitions += 1
if obj.game_over:
episodes += 1
yield obj
elif type(obj) == Episode:
episodes += 1
transitions += len(obj.transitions)
yield from obj.transitions
except Exception:
continue
if type(num_consecutive_playing_steps) == EnvironmentSteps:
steps = transitions
if type(num_consecutive_playing_steps) == EnvironmentEpisodes:
steps = episodes
if steps >= num_consecutive_playing_steps.num_steps:
break | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/memories/backend/redis.py | 0.680348 | 0.165189 | redis.py | pypi |
import os
import pickle
import numpy as np
try:
import annoy
from annoy import AnnoyIndex
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("annoy")
class AnnoyDictionary(object):
def __init__(self, dict_size, key_width, new_value_shift_coefficient=0.1, batch_size=100, key_error_threshold=0.01,
num_neighbors=50, override_existing_keys=True, rebuild_on_every_update=False):
self.rebuild_on_every_update = rebuild_on_every_update
self.max_size = dict_size
self.curr_size = 0
self.new_value_shift_coefficient = new_value_shift_coefficient
self.num_neighbors = num_neighbors
self.override_existing_keys = override_existing_keys
self.index = AnnoyIndex(key_width, metric='euclidean')
self.index.set_seed(1)
self.embeddings = np.zeros((dict_size, key_width))
self.values = np.zeros(dict_size)
self.additional_data = [None] * dict_size
self.lru_timestamps = np.zeros(dict_size)
self.current_timestamp = 0.0
# keys that are in this distance will be considered as the same key
self.key_error_threshold = key_error_threshold
self.initial_update_size = batch_size
self.min_update_size = self.initial_update_size
self.key_dimension = key_width
self.value_dimension = 1
self._reset_buffer()
self.built_capacity = 0
def add(self, keys, values, additional_data=None, force_rebuild_tree=False):
if not additional_data:
additional_data = [None] * len(keys)
# Adds new embeddings and values to the dictionary
indices = []
indices_to_remove = []
for i in range(keys.shape[0]):
index = self._lookup_key_index(keys[i])
if index and self.override_existing_keys:
# update existing value
self.values[index] += self.new_value_shift_coefficient * (values[i] - self.values[index])
self.additional_data[index[0][0]] = additional_data[i]
self.lru_timestamps[index] = self.current_timestamp
indices_to_remove.append(i)
else:
# add new
if self.curr_size >= self.max_size:
# find the LRU entry
index = np.argmin(self.lru_timestamps)
else:
index = self.curr_size
self.curr_size += 1
self.lru_timestamps[index] = self.current_timestamp
indices.append(index)
for i in reversed(indices_to_remove):
keys = np.delete(keys, i, 0)
values = np.delete(values, i, 0)
del additional_data[i]
self.buffered_keys = np.vstack((self.buffered_keys, keys))
self.buffered_values = np.vstack((self.buffered_values, values))
self.buffered_indices = self.buffered_indices + indices
self.buffered_additional_data = self.buffered_additional_data + additional_data
if len(self.buffered_indices) >= self.min_update_size:
self.min_update_size = max(self.initial_update_size, int(self.curr_size * 0.02))
self._rebuild_index()
elif force_rebuild_tree or self.rebuild_on_every_update:
self._rebuild_index()
self.current_timestamp += 1
# Returns the stored embeddings and values of the closest embeddings
def query(self, keys, k):
if not self.has_enough_entries(k):
# this will only happen when the DND is not yet populated with enough entries, which is only during heatup
# these values won't be used and therefore they are meaningless
return [0.0], [0.0], [0], [None]
_, indices = self._get_k_nearest_neighbors_indices(keys, k)
embeddings = []
values = []
additional_data = []
for ind in indices:
self.lru_timestamps[ind] = self.current_timestamp
embeddings.append(self.embeddings[ind])
values.append(self.values[ind])
curr_additional_data = []
for sub_ind in ind:
curr_additional_data.append(self.additional_data[sub_ind])
additional_data.append(curr_additional_data)
self.current_timestamp += 1
return embeddings, values, indices, additional_data
def has_enough_entries(self, k):
return self.curr_size > k and (self.built_capacity > k)
def sample_embeddings(self, num_embeddings):
return self.embeddings[np.random.choice(self.curr_size, num_embeddings)]
def _get_k_nearest_neighbors_indices(self, keys, k):
distances = []
indices = []
for key in keys:
index, distance = self.index.get_nns_by_vector(key, k, include_distances=True)
distances.append(distance)
indices.append(index)
return distances, indices
def _rebuild_index(self):
self.index.unbuild()
self.embeddings[self.buffered_indices] = self.buffered_keys
self.values[self.buffered_indices] = np.squeeze(self.buffered_values)
for i, data in zip(self.buffered_indices, self.buffered_additional_data):
self.additional_data[i] = data
for idx, key in zip(self.buffered_indices, self.buffered_keys):
self.index.add_item(idx, key)
self._reset_buffer()
self.index.build(self.num_neighbors)
self.built_capacity = self.curr_size
def _reset_buffer(self):
self.buffered_keys = np.zeros((0, self.key_dimension))
self.buffered_values = np.zeros((0, self.value_dimension))
self.buffered_indices = []
self.buffered_additional_data = []
def _lookup_key_index(self, key):
distance, index = self._get_k_nearest_neighbors_indices([key], 1)
if distance != [[]] and distance[0][0] <= self.key_error_threshold:
return index
return None
class QDND(object):
def __init__(self, dict_size, key_width, num_actions, new_value_shift_coefficient=0.1, key_error_threshold=0.01,
learning_rate=0.01, num_neighbors=50, return_additional_data=False, override_existing_keys=False,
rebuild_on_every_update=False):
self.dict_size = dict_size
self.key_width = key_width
self.num_actions = num_actions
self.new_value_shift_coefficient = new_value_shift_coefficient
self.key_error_threshold = key_error_threshold
self.learning_rate = learning_rate
self.num_neighbors = num_neighbors
self.return_additional_data = return_additional_data
self.override_existing_keys = override_existing_keys
self.dicts = []
# create a dict for each action
for a in range(num_actions):
new_dict = AnnoyDictionary(dict_size, key_width, new_value_shift_coefficient,
key_error_threshold=key_error_threshold, num_neighbors=num_neighbors,
override_existing_keys=override_existing_keys,
rebuild_on_every_update=rebuild_on_every_update)
self.dicts.append(new_dict)
def add(self, embeddings, actions, values, additional_data=None):
# add a new set of embeddings and values to each of the underlining dictionaries
embeddings = np.array(embeddings)
actions = np.array(actions)
values = np.array(values)
for a in range(self.num_actions):
idx = np.where(actions == a)
curr_action_embeddings = embeddings[idx]
curr_action_values = np.expand_dims(values[idx], -1)
if additional_data:
curr_additional_data = []
for i in idx[0]:
curr_additional_data.append(additional_data[i])
else:
curr_additional_data = None
self.dicts[a].add(curr_action_embeddings, curr_action_values, curr_additional_data)
return True
def query(self, embeddings, action, k):
# query for nearest neighbors to the given embeddings
dnd_embeddings = []
dnd_values = []
dnd_indices = []
dnd_additional_data = []
for i in range(len(embeddings)):
embedding, value, indices, additional_data = self.dicts[action].query([embeddings[i]], k)
dnd_embeddings.append(embedding[0])
dnd_values.append(value[0])
dnd_indices.append(indices[0])
dnd_additional_data.append(additional_data[0])
if self.return_additional_data:
return dnd_embeddings, dnd_values, dnd_indices, dnd_additional_data
else:
return dnd_embeddings, dnd_values, dnd_indices
def has_enough_entries(self, k):
# check if each of the action dictionaries has at least k entries
for a in range(self.num_actions):
if not self.dicts[a].has_enough_entries(k):
return False
return True
def update_keys_and_values(self, actions, key_gradients, value_gradients, indices):
# Update DND keys and values
for batch_action, batch_keys, batch_values, batch_indices in zip(actions, key_gradients, value_gradients, indices):
# Update keys (embeddings) and values in DND
for i, index in enumerate(batch_indices):
self.dicts[batch_action].embeddings[index, :] -= self.learning_rate * batch_keys[i, :]
self.dicts[batch_action].values[index] -= self.learning_rate * batch_values[i]
def sample_embeddings(self, num_embeddings):
num_actions = len(self.dicts)
embeddings = []
num_embeddings_per_action = int(num_embeddings/num_actions)
for action in range(num_actions):
embeddings.append(self.dicts[action].sample_embeddings(num_embeddings_per_action))
embeddings = np.vstack(embeddings)
# the numbers did not divide nicely, let's just randomly sample some more embeddings
if num_embeddings_per_action * num_actions < num_embeddings:
action = np.random.randint(0, num_actions)
extra_embeddings = self.dicts[action].sample_embeddings(num_embeddings -
num_embeddings_per_action * num_actions)
embeddings = np.vstack([embeddings, extra_embeddings])
return embeddings
def clean(self):
# create a new dict for each action
self.dicts = []
for a in range(self.num_actions):
new_dict = AnnoyDictionary(self.dict_size, self.key_width, self.new_value_shift_coefficient,
key_error_threshold=self.key_error_threshold, num_neighbors=self.num_neighbors)
self.dicts.append(new_dict)
def load_dnd(model_dir):
latest_checkpoint_id = -1
latest_checkpoint = ''
# get all checkpoint files
for fname in os.listdir(model_dir):
path = os.path.join(model_dir, fname)
if os.path.isdir(path) or fname.split('.')[-1] != 'srs':
continue
checkpoint_id = int(fname.split('_')[0])
if checkpoint_id > latest_checkpoint_id:
latest_checkpoint = fname
latest_checkpoint_id = checkpoint_id
with open(os.path.join(model_dir, str(latest_checkpoint)), 'rb') as f:
DND = pickle.load(f)
for a in range(DND.num_actions):
DND.dicts[a].index = AnnoyIndex(512, metric='euclidean')
DND.dicts[a].index.set_seed(1)
for idx, key in zip(range(DND.dicts[a].curr_size), DND.dicts[a].embeddings[:DND.dicts[a].curr_size]):
DND.dicts[a].index.add_item(idx, key)
DND.dicts[a].index.build(50)
return DND | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/memories/non_episodic/differentiable_neural_dictionary.py | 0.566498 | 0.233499 | differentiable_neural_dictionary.py | pypi |
import operator
import random
from enum import Enum
from typing import List, Tuple, Any, Union
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
class BalancedExperienceReplayParameters(ExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.allow_duplicates_in_batch_sampling = False
self.num_classes = 0
self.state_key_with_the_class_index = 'class'
@property
def path(self):
return 'rl_coach.memories.non_episodic.balanced_experience_replay:BalancedExperienceReplay'
"""
A replay buffer which allows sampling batches which are balanced in terms of the classes that are sampled
"""
class BalancedExperienceReplay(ExperienceReplay):
def __init__(self, max_size: Tuple[MemoryGranularity, int], allow_duplicates_in_batch_sampling: bool=True,
num_classes: int=0, state_key_with_the_class_index: Any='class'):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
:param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
:param num_classes: the number of classes in the replayed data
:param state_key_with_the_class_index: the class index is assumed to be a value in the state dictionary.
this parameter determines the key to retrieve the class index value
"""
super().__init__(max_size, allow_duplicates_in_batch_sampling)
self.current_class_to_sample_from = 0
self.num_classes = num_classes
self.state_key_with_the_class_index = state_key_with_the_class_index
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
if self.num_classes < 2:
raise ValueError("The number of classes for a balanced replay buffer should be at least 2. "
"The number of classes that were defined are: {}".format(self.num_classes))
def store(self, transition: Transition, lock: bool=True) -> None:
"""
Store a new transition in the memory.
:param transition: a transition to store
:param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
locks and then calls store with lock = True
:return: None
"""
# Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
super().store(transition)
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self._num_transitions += 1
if self.state_key_with_the_class_index not in transition.state.keys():
raise ValueError("The class index was not present in the state of the transition under the given key ({})"
.format(self.state_key_with_the_class_index))
class_idx = transition.state[self.state_key_with_the_class_index]
if class_idx >= self.num_classes:
raise ValueError("The given class index is outside the defined number of classes for the replay buffer. "
"The given class was: {} and the number of classes defined is: {}"
.format(class_idx, self.num_classes))
self.transitions[class_idx].append(transition)
self.transitions_order.append(class_idx)
self._enforce_max_length()
if lock:
self.reader_writer_lock.release_writing_and_reading()
def sample(self, size: int) -> List[Transition]:
"""
Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:return: a batch (list) of selected transitions from the replay buffer
"""
self.reader_writer_lock.lock_writing()
if size % self.num_classes != 0:
raise ValueError("Sampling batches from a balanced replay buffer should be done only using batch sizes "
"which are a multiple of the number of classes. The number of classes defined is: {} "
"and the batch size requested is: {}".format(self.num_classes, size))
batch_size_from_each_class = size // self.num_classes
if self.allow_duplicates_in_batch_sampling:
transitions_idx = [np.random.randint(len(class_transitions), size=batch_size_from_each_class)
for class_transitions in self.transitions]
else:
for class_idx, class_transitions in enumerate(self.transitions):
if self.num_transitions() < batch_size_from_each_class:
raise ValueError("The replay buffer cannot be sampled since there are not enough transitions yet. "
"There are currently {} transitions for class {}"
.format(len(class_transitions), class_idx))
transitions_idx = [np.random.choice(len(class_transitions), size=batch_size_from_each_class, replace=False)
for class_transitions in self.transitions]
batch = []
for class_idx, class_transitions_idx in enumerate(transitions_idx):
batch += [self.transitions[class_idx][i] for i in class_transitions_idx]
self.reader_writer_lock.release_writing()
return batch
def remove_transition(self, transition_index: int, lock: bool=True) -> None:
raise ValueError("It is not possible to remove specific transitions with a balanced replay buffer")
def get_transition(self, transition_index: int, lock: bool=True) -> Union[None, Transition]:
raise ValueError("It is not possible to access specific transitions with a balanced replay buffer")
def _enforce_max_length(self) -> None:
"""
Make sure that the size of the replay buffer does not pass the maximum size allowed.
If it passes the max size, the oldest transition in the replay buffer will be removed.
This function does not use locks since it is only called internally
:return: None
"""
granularity, size = self.max_size
if granularity == MemoryGranularity.Transitions:
while size != 0 and self.num_transitions() > size:
self._num_transitions -= 1
del self.transitions[self.transitions_order[0]][0]
del self.transitions_order[0]
else:
raise ValueError("The granularity of the replay buffer can only be set in terms of transitions")
def clean(self, lock: bool=True) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
self.transitions = [[] for _ in range(self.num_classes)]
self.transitions_order = []
self._num_transitions = 0
if lock:
self.reader_writer_lock.release_writing_and_reading() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/memories/non_episodic/balanced_experience_replay.py | 0.864368 | 0.439326 | balanced_experience_replay.py | pypi |
import operator
import random
from enum import Enum
from typing import List, Tuple, Any
import numpy as np
from rl_coach.core_types import Transition
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters, ExperienceReplay
from rl_coach.schedules import Schedule, ConstantSchedule
class PrioritizedExperienceReplayParameters(ExperienceReplayParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.alpha = 0.6
self.beta = ConstantSchedule(0.4)
self.epsilon = 1e-6
@property
def path(self):
return 'rl_coach.memories.non_episodic.prioritized_experience_replay:PrioritizedExperienceReplay'
class SegmentTree(object):
"""
A tree which can be used as a min/max heap or a sum tree
Add or update item value - O(log N)
Sampling an item - O(log N)
"""
class Operation(Enum):
MAX = {"operator": max, "initial_value": -float("inf")}
MIN = {"operator": min, "initial_value": float("inf")}
SUM = {"operator": operator.add, "initial_value": 0}
def __init__(self, size: int, operation: Operation):
self.next_leaf_idx_to_write = 0
self.size = size
if not (size > 0 and size & (size - 1) == 0):
raise ValueError("A segment tree size must be a positive power of 2. The given size is {}".format(self.size))
self.operation = operation
self.tree = np.ones(2 * size - 1) * self.operation.value['initial_value']
self.data = [None] * size
def _propagate(self, node_idx: int) -> None:
"""
Propagate an update of a node's value to its parent node
:param node_idx: the index of the node that was updated
:return: None
"""
parent = (node_idx - 1) // 2
self.tree[parent] = self.operation.value['operator'](self.tree[parent * 2 + 1], self.tree[parent * 2 + 2])
if parent != 0:
self._propagate(parent)
def _retrieve(self, root_node_idx: int, val: float)-> int:
"""
Retrieve the first node that has a value larger than val and is a child of the node at index idx
:param root_node_idx: the index of the root node to search from
:param val: the value to query for
:return: the index of the resulting node
"""
left = 2 * root_node_idx + 1
right = left + 1
if left >= len(self.tree):
return root_node_idx
if val <= self.tree[left]:
return self._retrieve(left, val)
else:
return self._retrieve(right, val-self.tree[left])
def total_value(self) -> float:
"""
Return the total value of the tree according to the tree operation. For SUM for example, this will return
the total sum of the tree. for MIN, this will return the minimal value
:return: the total value of the tree
"""
return self.tree[0]
def add(self, val: float, data: Any) -> None:
"""
Add a new value to the tree with data assigned to it
:param val: the new value to add to the tree
:param data: the data that should be assigned to this value
:return: None
"""
self.data[self.next_leaf_idx_to_write] = data
self.update(self.next_leaf_idx_to_write, val)
self.next_leaf_idx_to_write += 1
if self.next_leaf_idx_to_write >= self.size:
self.next_leaf_idx_to_write = 0
def update(self, leaf_idx: int, new_val: float) -> None:
"""
Update the value of the node at index idx
:param leaf_idx: the index of the node to update
:param new_val: the new value of the node
:return: None
"""
node_idx = leaf_idx + self.size - 1
if not 0 <= node_idx < len(self.tree):
raise ValueError("The given left index ({}) can not be found in the tree. The available leaves are: 0-{}"
.format(leaf_idx, self.size - 1))
self.tree[node_idx] = new_val
self._propagate(node_idx)
def get_element_by_partial_sum(self, val: float) -> Tuple[int, float, Any]:
"""
Given a value between 0 and the tree sum, return the object which this value is in it's range.
For example, if we have 3 leaves: 10, 20, 30, and val=35, this will return the 3rd leaf, by accumulating
leaves by their order until getting to 35. This allows sampling leaves according to their proportional
probability.
:param val: a value within the range 0 and the tree sum
:return: the index of the resulting leaf in the tree, its probability and
the object itself
"""
node_idx = self._retrieve(0, val)
leaf_idx = node_idx - self.size + 1
data_value = self.tree[node_idx]
data = self.data[leaf_idx]
return leaf_idx, data_value, data
def __str__(self):
result = ""
start = 0
size = 1
while size <= self.size:
result += "{}\n".format(self.tree[start:(start + size)])
start += size
size *= 2
return result
class PrioritizedExperienceReplay(ExperienceReplay):
"""
This is the proportional sampling variant of the prioritized experience replay as described
in https://arxiv.org/pdf/1511.05952.pdf.
"""
def __init__(self, max_size: Tuple[MemoryGranularity, int], alpha: float=0.6, beta: Schedule=ConstantSchedule(0.4),
epsilon: float=1e-6, allow_duplicates_in_batch_sampling: bool=True):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
:param alpha: the alpha prioritization coefficient
:param beta: the beta parameter used for importance sampling
:param epsilon: a small value added to the priority of each transition
:param allow_duplicates_in_batch_sampling: allow having the same transition multiple times in a batch
"""
if max_size[0] != MemoryGranularity.Transitions:
raise ValueError("Prioritized Experience Replay currently only support setting the memory size in "
"transitions granularity.")
self.power_of_2_size = 1
while self.power_of_2_size < max_size[1]:
self.power_of_2_size *= 2
super().__init__((MemoryGranularity.Transitions, self.power_of_2_size), allow_duplicates_in_batch_sampling)
self.sum_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.SUM)
self.min_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MIN)
self.max_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MAX)
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
self.maximal_priority = 1.0
def _update_priority(self, leaf_idx: int, error: float) -> None:
"""
Update the priority of a given transition, using its index in the tree and its error
:param leaf_idx: the index of the transition leaf in the tree
:param error: the new error value
:return: None
"""
if error < 0:
raise ValueError("The priorities must be non-negative values")
priority = (error + self.epsilon)
self.sum_tree.update(leaf_idx, priority ** self.alpha)
self.min_tree.update(leaf_idx, priority ** self.alpha)
self.max_tree.update(leaf_idx, priority)
self.maximal_priority = self.max_tree.total_value()
def update_priorities(self, indices: List[int], error_values: List[float]) -> None:
"""
Update the priorities of a batch of transitions using their indices and their new TD error terms
:param indices: the indices of the transitions to update
:param error_values: the new error values
:return: None
"""
self.reader_writer_lock.lock_writing_and_reading()
if len(indices) != len(error_values):
raise ValueError("The number of indexes requested for update don't match the number of error values given")
for transition_idx, error in zip(indices, error_values):
self._update_priority(transition_idx, error)
self.reader_writer_lock.release_writing_and_reading()
def sample(self, size: int) -> List[Transition]:
"""
Sample a batch of transitions form the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:return: a batch (list) of selected transitions from the replay buffer
"""
self.reader_writer_lock.lock_writing()
if self.num_transitions() >= size:
# split the tree leaves to equal segments and sample one transition from each segment
batch = []
segment_size = self.sum_tree.total_value() / size
# get the maximum weight in the memory
min_probability = self.min_tree.total_value() / self.sum_tree.total_value() # min P(j) = min p^a / sum(p^a)
max_weight = (min_probability * self.num_transitions()) ** -self.beta.current_value # max wi
# sample a batch
for i in range(size):
segment_start = segment_size * i
segment_end = segment_size * (i + 1)
# sample leaf and calculate its weight
val = random.uniform(segment_start, segment_end)
leaf_idx, priority, transition = self.sum_tree.get_element_by_partial_sum(val)
priority /= self.sum_tree.total_value() # P(j) = p^a / sum(p^a)
weight = (self.num_transitions() * priority) ** -self.beta.current_value # (N * P(j)) ^ -beta
normalized_weight = weight / max_weight # wj = ((N * P(j)) ^ -beta) / max wi
transition.info['idx'] = leaf_idx
transition.info['weight'] = normalized_weight
batch.append(transition)
self.beta.step()
else:
raise ValueError("The replay buffer cannot be sampled since there are not enough transitions yet. "
"There are currently {} transitions".format(self.num_transitions()))
self.reader_writer_lock.release_writing()
return batch
def store(self, transition: Transition, lock=True) -> None:
"""
Store a new transition in the memory.
:param transition: a transition to store
:return: None
"""
# Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
super().store(transition)
if lock:
self.reader_writer_lock.lock_writing_and_reading()
transition_priority = self.maximal_priority
self.sum_tree.add(transition_priority ** self.alpha, transition)
self.min_tree.add(transition_priority ** self.alpha, transition)
self.max_tree.add(transition_priority, transition)
super().store(transition, False)
if lock:
self.reader_writer_lock.release_writing_and_reading()
def clean(self, lock=True) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
if lock:
self.reader_writer_lock.lock_writing_and_reading()
super().clean(lock=False)
self.sum_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.SUM)
self.min_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MIN)
self.max_tree = SegmentTree(self.power_of_2_size, SegmentTree.Operation.MAX)
if lock:
self.reader_writer_lock.release_writing_and_reading() | /rl-coach-1.0.1.tar.gz/rl-coach-1.0.1/rl_coach/memories/non_episodic/prioritized_experience_replay.py | 0.912799 | 0.472379 | prioritized_experience_replay.py | pypi |
import math
import matplotlib.pyplot as plt
import numpy as np
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Udacity Data Science Nano degree class project
Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
self.mean = np.mean(np.array(self.data))
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample == True:
self.stdev = np.std(np.array(self.data), ddof=1)
else:
self.stdev = np.std(np.array(self.data))
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
True: if data read properly
False: if non-numeric data encountered
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
line = file.readline()
data_list = []
while line:
try:
data_list.append(int(line))
except ValueError:
print("This line is not numeric: ", line)
return False
line = file.readline()
file.close()
print("Loaded data are:", data_list)
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
return True
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2 * math.pi))) * math.exp(-0.5 * ((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces=50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
# TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval * i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2, sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# create a new Gaussian object
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean " + str(self.mean) + ", standard deviation " + str(self.stdev) | /rl_distributions-0.1.tar.gz/rl_distributions-0.1/rl_distributions/Gaussiandistribution.py | 0.807916 | 0.842151 | Gaussiandistribution.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
"""
Udacity Data Science degree class exercise
Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p, self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x=['0', '1'], height=[(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the binomial distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}". \
format(self.mean, self.stdev, self.p, self.n) | /rl_distributions-0.1.tar.gz/rl_distributions-0.1/rl_distributions/Binomialdistribution.py | 0.914577 | 0.901271 | Binomialdistribution.py | pypi |
import argparse
import os
import os.path as osp
import uuid
from typing import Any, Callable, Dict, List, Optional
from omegaconf import OmegaConf
from rl_utils.launcher.run_exp import get_random_id, sub_in_args, sub_in_vars
RUN_DIR = "data/log/runs/"
def change_arg_vals(cmd_parts: List[str], new_arg_values: Dict[str, Any]) -> List[str]:
"""
If the argument value does not exist, it will be added.
:param new_arg_values: If the value is a function, it will take as input
the current argument value and return the new argument value.
"""
did_find = {k: False for k in new_arg_values.keys()}
for i in range(len(cmd_parts) - 1):
if cmd_parts[i] in new_arg_values:
replace_val = new_arg_values[cmd_parts[i]]
if isinstance(replace_val, Callable):
cmd_parts[i + 1] = replace_val(cmd_parts[i + 1])
else:
cmd_parts[i + 1] = replace_val
did_find[cmd_parts[i]] = True
all_not_found_k = [k for k, did_find in did_find.items() if not did_find]
for not_found_k in all_not_found_k:
new_val = new_arg_values[not_found_k]
if isinstance(new_val, Callable):
new_val = new_val("")
cmd_parts.extend([not_found_k, new_val])
return cmd_parts
def split_cmd_txt(cmd):
return [y for x in cmd.split(" ") for y in x.split("=")]
def eval_ckpt(
run_id,
model_ckpt_path,
new_run_id,
cfg,
proj_dat,
modify_run_cmd_fn,
args: argparse.Namespace,
):
eval_sys_cfg = cfg.eval_sys
# Find the run command.
run_path = osp.join(RUN_DIR, run_id + ".sh")
if osp.exists(run_path):
# Get the actually executed command.
with open(run_path, "r") as f:
cmd = f.readlines()[-1]
cmd_parts = split_cmd_txt(cmd)
else:
cmd = eval_sys_cfg.eval_run_cmd
add_all = cfg.get("add_all", None)
if add_all is not None:
cmd = sub_in_args(cmd, add_all)
cmd = sub_in_vars(cmd, cfg, 0, "eval")
ident = get_random_id()
cmd = cmd.replace("$SLURM_ID", ident)
cmd_parts = split_cmd_txt(cmd)
# Dummy line for srun
cmd_parts.insert(0, "")
def add_eval_suffix(x):
x = x.strip()
if x == "":
return get_random_id() + "_eval"
elif "." in x:
parts = x.split(".")
return parts[0] + "_eval." + parts[1]
elif x[-1] == "/":
return x[:-1] + "_eval/"
else:
return x + "_eval"
cmd_parts = change_arg_vals(
cmd_parts,
{
**{k: add_eval_suffix for k in eval_sys_cfg.add_eval_to_vals},
**eval_sys_cfg.change_vals,
},
)
cmd_parts.extend(
[
eval_sys_cfg.ckpt_load_k,
model_ckpt_path,
]
)
add_env_vars = cfg.add_env_vars
if proj_dat is not None:
for k in proj_dat.split(","):
cmd_parts.extend(split_cmd_txt(cfg.proj_data[k]))
add_env_vars.append(cfg.get("proj_dat_add_env_vars", {}).get(k, ""))
cmd_parts = cmd_parts[1:]
cmd_parts = [*add_env_vars, *cmd_parts]
if modify_run_cmd_fn is not None:
cmd_parts = modify_run_cmd_fn(cmd_parts, new_run_id, args)
python_file = -1
for i, cmd_part in enumerate(cmd_parts):
if ".py" in cmd_part:
python_file = i
break
new_cmd = (" ".join(cmd_parts[: python_file + 1])) + " "
for i in range(python_file + 1, len(cmd_parts) - 1, 2):
k, v = cmd_parts[i], cmd_parts[i + 1]
if k.startswith("--"):
sep = " "
else:
sep = eval_sys_cfg.sep
new_cmd += f" {k}{sep}{v}"
new_cmd = sub_in_vars(new_cmd, cfg, 0, "eval")
print("EVALUATING ", new_cmd)
os.system(new_cmd)
return True
def run(
modify_run_cmd_fn: Optional[
Callable[[List[str], str, argparse.Namespace], List[str]]
] = None,
add_args_fn: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
parser = argparse.ArgumentParser()
parser.add_argument("--runs", default=None, type=str)
parser.add_argument("--proj-dat", default=None, type=str)
parser.add_argument("--idx", default=None, type=int)
parser.add_argument("--cfg", required=True, type=str)
parser.add_argument("--debug", action="store_true")
if add_args_fn is not None:
add_args_fn(parser)
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
eval_sys_cfg = cfg.eval_sys
runs = args.runs.split(",")
for run_id in runs:
full_path = osp.join(cfg.base_data_dir, eval_sys_cfg.ckpt_search_dir, run_id)
ckpt_idxs = [
int(f.split(".")[1])
for f in os.listdir(full_path)
if ".pth" in f and "ckpt" in f
]
if args.idx is None:
last_idx = max(ckpt_idxs)
else:
last_idx = args.idx
full_path = osp.join(full_path, f"ckpt.{last_idx}.pth")
rnd_id = str(uuid.uuid4())[:3]
new_run_id = f"{run_id}_eval_{rnd_id}"
eval_ckpt(
run_id,
full_path,
new_run_id,
cfg,
args.proj_dat,
modify_run_cmd_fn,
args,
)
if args.debug:
break
if __name__ == "__main__":
run() | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/launcher/eval_sys.py | 0.468791 | 0.153296 | eval_sys.py | pypi |
from omegaconf import DictConfig, OmegaConf
from rl_utils.logging.base_logger import Logger, LoggerCfgType
try:
import wandb
except ImportError:
wandb = None
class WbLogger(Logger):
"""
Logger for logging to the weights and W&B online service.
"""
def __init__(
self,
wb_proj_name: str,
wb_entity: str,
seed: int,
log_dir: str,
vid_dir: str,
save_dir: str,
full_cfg: LoggerCfgType,
smooth_len: int = 1,
run_name: str = "",
group_name: str = "",
**kwargs,
):
"""
:parameter run_name: If empty string then a random run name is assigned.
:parameter group_name: If empty string then no group name is used.
"""
if wandb is None:
raise ImportError("Wandb is not installed")
super().__init__(
seed, log_dir, vid_dir, save_dir, full_cfg, smooth_len, run_name
)
if wb_proj_name == "" or wb_entity == "":
raise ValueError(
f"Must specify W&B project and entity name {wb_proj_name}, {wb_entity}"
)
self.wb_proj_name = wb_proj_name
self.wb_entity = wb_entity
self.wandb = self._create_wandb(full_cfg, group_name)
def log_vals(self, key_vals, step_count):
wandb.log(key_vals, step=int(step_count))
def watch_model(self, model):
wandb.watch(model)
def _create_wandb(self, full_cfg: LoggerCfgType, group_name: str):
if group_name == "":
group_name = None
if isinstance(full_cfg, DictConfig):
full_cfg = OmegaConf.to_container(full_cfg, resolve=True)
self.run = wandb.init(
project=self.wb_proj_name,
name=self.run_name,
entity=self.wb_entity,
group=group_name,
config=full_cfg,
)
return wandb
def collect_img(self, k: str, img_path: str, prefix: str = ""):
use_k = prefix + k
self._step_log_info[use_k] = wandb.Image(img_path)
self._clear_keys.add(use_k)
def close(self):
self.run.finish() | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/logging/wb_logger.py | 0.583322 | 0.171981 | wb_logger.py | pypi |
from typing import Dict, List, Union
import numpy as np
import torch
from torch import nn as nn
class SimpleCNN(nn.Module):
"""A Simple 3-Conv CNN followed by a fully connected layer
Takes in observations and produces an embedding of the rgb and/or depth
components. Note the observation spaces should be in format (W, H, C) but
the input to the model should be in format (C, W, H).
:param obs_shape: Either a dictionary of observation space shapes or the
observation space shape. This will only select the keys from the
observation space have 3 tensor dimensions.
:param output_size: The hidden dimension output size
"""
def __init__(
self,
obs_shape: Union[Dict[str, List[int]], List[int]],
output_size: int,
):
super().__init__()
if not isinstance(obs_shape, dict):
obs_shape = {None: obs_shape}
obs_input_keys = [k for k, v in obs_shape.items() if len(v) == 3]
self._n_input = sum(obs_shape[k][2] for k in obs_input_keys)
self._obs_input_keys = obs_input_keys
# kernel size for different CNN layers
self._cnn_layers_kernel_size = [(8, 8), (4, 4), (3, 3)]
# strides for different CNN layers
self._cnn_layers_stride = [(4, 4), (2, 2), (1, 1)]
cnn_dims = np.array(obs_shape[obs_input_keys[0]][:2], dtype=np.float32)
for kernel_size, stride in zip(
self._cnn_layers_kernel_size, self._cnn_layers_stride
):
cnn_dims = self._conv_output_dim(
dimension=cnn_dims,
padding=np.array([0, 0], dtype=np.float32),
dilation=np.array([1, 1], dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
self.cnn = nn.Sequential(
nn.Conv2d(
in_channels=self._n_input,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[0],
stride=self._cnn_layers_stride[0],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[1],
stride=self._cnn_layers_stride[1],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=64,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[2],
stride=self._cnn_layers_stride[2],
),
# nn.ReLU(True),
nn.Flatten(),
nn.Linear(32 * cnn_dims[0] * cnn_dims[1], output_size),
nn.ReLU(True),
)
self.layer_init()
def _conv_output_dim(self, dimension, padding, dilation, kernel_size, stride):
r"""Calculates the output height and width based on the input
height and width to the convolution layer.
ref: https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
def layer_init(self):
for layer in self.cnn: # type: ignore
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(layer.weight, nn.init.calculate_gain("relu"))
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
def forward(self, obs: Union[Dict[str, torch.Tensor], torch.Tensor]):
if isinstance(obs, dict):
cnn_inputs = torch.cat([obs[k] for k in self._obs_input_keys], dim=1)
else:
cnn_inputs = obs
return self.cnn(cnn_inputs) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/models/simple_cnn.py | 0.961061 | 0.70557 | simple_cnn.py | pypi |
import argparse
from typing import Callable, Dict, List, Optional
import pandas as pd
from omegaconf import OmegaConf
from rl_utils.plotting.utils import MISSING_VALUE
from rl_utils.plotting.wb_query import fetch_data_from_cfg
def plot_table(
df: pd.DataFrame,
col_key: str,
row_key: str,
cell_key: str,
col_order: List[str],
row_order: List[str],
renames: Optional[Dict[str, str]] = None,
error_scaling=1.0,
n_decimals=2,
missing_fill_value=MISSING_VALUE,
error_fill_value=0.3444,
get_row_highlight: Optional[Callable[[str, pd.DataFrame], Optional[str]]] = None,
make_col_header: Optional[Callable[[int], str]] = None,
x_label: str = "",
y_label: str = "",
skip_toprule: bool = False,
include_err: bool = True,
write_to=None,
err_key: Optional[str] = None,
add_tabular: bool = True,
bold_row_names: bool = True,
show_row_labels: bool = True,
show_col_labels: bool = True,
compute_err_fn: Optional[Callable[[pd.Series], pd.Series]] = None,
value_scaling: float = 1.0,
):
"""
:param df: The index of the data frame does not matter, only the row values and column names matter.
:param col_key: A string from the set of columns.
:param row_key: A string from the set of columns (but this is used to form the rows of the table).
:param renames: Only used for display name conversions. Does not affect functionality.
:param make_col_header: Returns the string at the top of the table like
"ccccc". Put "c|ccccc" to insert a vertical line in between the first
and other columns.
:param x_label: Renders another row of text on the top that spans all the columns.
:param y_label: Renders a side column with vertically rotated text that spawns all the rows.
:param err_key: If non-None, this will be used as the error and override any error calculation.
:param show_row_labels: If False, the row names are not diplayed, and no
column for the row name is displayed.
Example: the data fame might look like
```
democount type final_train_success
0 100 mirl train 0.9800
1 100 mirl train 0.9900
3 100 mirl eval 1.0000
4 100 mirl eval 1.0000
12 50 mirl train 0.9700
13 50 mirl train 1.0000
15 50 mirl eval 1.0000
16 50 mirl eval 0.7200
```
`col_key='type', row_key='demcount', cell_key='final_train_success'` plots
the # of demos as rows and the type as columns with the final_train_success
values as the cell values. Duplicate row and columns are automatically
grouped together.
"""
df[cell_key] = df[cell_key] * value_scaling
if make_col_header is None:
def make_col_header(n_cols):
return "c" * n_cols
if renames is None:
renames = {}
df = df.replace("missing", missing_fill_value)
df = df.replace("error", error_fill_value)
rows = {}
for row_k, row_df in df.groupby(row_key):
grouped = row_df.groupby(col_key)
df_avg_y = grouped[cell_key].mean()
df_std_y = grouped[cell_key].std() * error_scaling
sel_err = False
if err_key is not None:
err = grouped[err_key].mean()
if not err.hasnans:
df_std_y = err
sel_err = True
if not sel_err and compute_err_fn is not None:
df_std_y = compute_err_fn(grouped[cell_key])
rows[row_k] = (df_avg_y, df_std_y)
col_sep = " & "
row_sep = " \\\\\n"
all_s = []
def clean_text(s):
return s.replace("%", "\\%").replace("_", " ")
# Add the column title row.
row_str = []
if show_row_labels:
row_str.append("")
for col_k in col_order:
row_str.append("\\textbf{%s}" % clean_text(renames.get(col_k, col_k)))
all_s.append(col_sep.join(row_str))
for row_k in row_order:
if row_k == "hline":
all_s.append("\\hline")
continue
row_str = []
if show_row_labels:
if bold_row_names:
row_str.append("\\textbf{%s}" % clean_text(renames.get(row_k, row_k)))
else:
row_str.append(clean_text(renames.get(row_k, row_k)))
row_y, row_std = rows[row_k]
if get_row_highlight is not None:
sel_col = get_row_highlight(row_k, row_y)
else:
sel_col = None
for col_k in col_order:
if col_k not in row_y:
row_str.append("-")
else:
val = row_y.loc[col_k]
std = row_std.loc[col_k]
if val == missing_fill_value * value_scaling:
row_str.append("-")
elif val == error_fill_value:
row_str.append("E")
else:
err = ""
if include_err:
err = f"$ \\pm$ %.{n_decimals}f " % std
err = f"{{\\scriptsize {err} }}"
txt = f" %.{n_decimals}f {err}" % val
if col_k == sel_col:
txt = "\\textbf{ " + txt + " }"
row_str.append(txt)
all_s.append(col_sep.join(row_str))
n_columns = len(col_order)
if show_row_labels:
n_columns += 1
col_header_s = make_col_header(n_columns)
if y_label != "":
col_header_s = "c" + col_header_s
start_of_line = " & "
toprule = ""
midrule = "\\cmidrule{2-%s}\n" % (n_columns + 1)
botrule = midrule
row_lines = [start_of_line + x for x in all_s[1:]]
row_lines[0] = (
"\\multirow{4}{1em}{\\rotatebox{90}{%s}}" % y_label
) + row_lines[0]
else:
row_lines = all_s[1:]
start_of_line = ""
toprule = "\\toprule\n"
midrule = "\\midrule\n"
botrule = "\\bottomrule"
if skip_toprule:
toprule = ""
if x_label != "":
toprule += ("& \\multicolumn{%i}{c}{%s}" % (n_columns, x_label)) + row_sep
ret_s = ""
if add_tabular:
ret_s += "\\begin{tabular}{%s}\n" % col_header_s
# Line above the table.
ret_s += toprule
if show_col_labels:
# Separate the column headers from the rest of the table by a line.
ret_s += start_of_line + all_s[0] + row_sep
ret_s += midrule
all_row_s = ""
for row_line in row_lines:
all_row_s += row_line
# Do not add the separator to the last element if we are not in tabular mode.
if "hline" not in row_line:
all_row_s += row_sep
else:
all_row_s += "\n"
ret_s += all_row_s
# Line below the table.
if add_tabular:
ret_s += botrule
ret_s += "\n\\end{tabular}\n"
if write_to is not None:
with open(write_to, "w") as f:
f.write(ret_s)
print(f"Wrote result to {write_to}")
else:
print(ret_s)
return ret_s
def plot_from_file(plot_cfg_path, add_query_fields=None):
cfg = OmegaConf.load(plot_cfg_path)
df = fetch_data_from_cfg(plot_cfg_path, add_query_fields)
plot_table(df, cell_key=cfg.plot_key, **cfg.sub_plot_params)
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cfg", type=str, required=True)
args = parser.parse_args()
plot_from_file(args.cfg) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/plotting/auto_table.py | 0.836988 | 0.613439 | auto_table.py | pypi |
try:
import wandb
except ImportError:
wandb = None
import os
import os.path as osp
from argparse import ArgumentParser
from collections import defaultdict
from pprint import pprint
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
from omegaconf import DictConfig, OmegaConf
from rl_utils.common.core_utils import CacheHelper
from rl_utils.plotting.utils import MISSING_VALUE
def extract_query_key(k):
if k.startswith("ALL_"):
return k.split("ALL_")[1]
return k
def batch_query(
all_select_fields: List[List[str]],
all_filter_fields: List[Dict[str, Any]],
proj_cfg: Dict[str, Any],
all_should_skip: Optional[List[bool]] = None,
all_add_info: Optional[List[Dict[str, Any]]] = None,
verbose=True,
limit=None,
use_cached=False,
reduce_op: Optional[Callable[[List], float]] = None,
error_ok: bool = False,
):
"""
- all_should_skip: Whether to skip querying this value.
"""
n_query = len(all_select_fields)
if all_add_info is None:
all_add_info = [None for _ in range(n_query)]
if all_should_skip is None:
all_should_skip = [False for _ in range(n_query)]
data = []
for select_fields, filter_fields, should_skip, add_info in zip(
all_select_fields, all_filter_fields, all_should_skip, all_add_info
):
r = []
if not should_skip:
r = query(
select_fields,
filter_fields,
proj_cfg,
verbose,
limit,
use_cached,
reduce_op,
error_ok=error_ok,
)
if len(r) == 0:
r = [{k: MISSING_VALUE for k in select_fields}]
for d in r:
if add_info is None:
data.append(d)
else:
data.append({**add_info, **d})
return data
def query(
select_fields: List[str],
filter_fields: Dict[str, str],
proj_cfg: Dict[str, Any],
verbose=True,
limit=None,
use_cached=False,
reduce_op: Optional[Callable[[List], float]] = None,
error_ok: bool = False,
):
"""
:param select_fields: The list of data to retrieve. If a field starts with
"ALL_", then all the entries for this name from W&B are fetched. This gets
the ENTIRE history. Other special keys include: "_runtime" (in
seconds), "_timestamp".
:param filter_fields: Key is the filter type (like group or tag) and value
is the filter value (like the name of the group or tag to match)
:param reduce_op: `np.mean` would take the average of the results.
:param use_cached: Saves the results to disk so next time the same result is requested, it is loaded from disk rather than W&B.
See README for more information.
"""
wb_proj_name = proj_cfg["proj_name"]
wb_entity = proj_cfg["wb_entity"]
lookup = f"{select_fields}_{filter_fields}"
cache = CacheHelper("wb_queries", lookup)
if use_cached and cache.exists():
return cache.load()
if wandb is None:
raise ValueError("Wandb is not installed")
api = wandb.Api()
query_dict = {}
search_id = None
for f, v in filter_fields.items():
if f == "group":
query_dict["group"] = v
elif f == "tag":
query_dict["tags"] = v
elif f == "id":
search_id = v
else:
query_dict["config." + f] = v
def log(s):
if verbose:
print(s)
if search_id is None:
log("Querying with")
log(query_dict)
runs = api.runs(f"{wb_entity}/{wb_proj_name}", query_dict)
else:
log(f"Searching for ID {search_id}")
runs = [api.run(f"{wb_entity}/{wb_proj_name}/{search_id}")]
log(f"Returned {len(runs)} runs")
ret_data = []
for rank_i, run in enumerate(runs):
dat = {"rank": rank_i}
for f in select_fields:
v = None
if f == "last_model":
parts = proj_cfg["ckpt_cfg_key"].split(".")
model_path = run.config
for k in parts:
model_path = model_path[k]
if proj_cfg.get("ckpt_append_name", False):
model_path = osp.join(model_path, run.name)
if not osp.exists(model_path):
raise ValueError(f"Could not locate model folder {model_path}")
model_idxs = [
int(model_f.split("ckpt.")[1].split(".pth")[0])
for model_f in os.listdir(model_path)
if model_f.startswith("ckpt.")
]
if len(model_idxs) == 0:
raise ValueError(f"No models found under {model_path}")
max_idx = max(model_idxs)
final_model_f = osp.join(model_path, f"ckpt.{max_idx}.pth")
v = final_model_f
elif f == "summary":
v = dict(run.summary)
v["status"] = str(run.state)
# Filter out non-primitive values.
v = {
k: k_v for k, k_v in v.items() if isinstance(k_v, (int, float, str))
}
elif f == "status":
v = run.state
elif f == "config":
v = run.config
elif f == "id":
v = run.id
elif f.startswith("config."):
config_parts = f.split("config.")
parts = config_parts[1].split(".")
v = run.config
for k in parts:
v = v[k]
else:
if f.startswith("ALL_"):
fetch_field = extract_query_key(f)
df = run.history(samples=15000)
if fetch_field not in df.columns:
raise ValueError(
f"Could not find {fetch_field} in {df.columns} for query {filter_fields}"
)
v = df[["_step", fetch_field]]
else:
if f not in run.summary:
if error_ok:
continue
raise ValueError(
f"Could not find {f} in {run.summary.keys()} from run {run} with query {query_dict}"
)
v = run.summary[f]
if v is not None:
dat[f] = v
if len(dat) > 0:
ret_data.append(dat)
if limit is not None and len(ret_data) >= limit:
break
cache.save(ret_data)
if reduce_op is not None:
reduce_data = defaultdict(list)
for p in ret_data:
for k, v in p.items():
reduce_data[k].append(v)
ret_data = {k: reduce_op(v) for k, v in reduce_data.items()}
log(f"Got data {ret_data}")
return ret_data
def query_s(
query_str: str,
proj_cfg: DictConfig,
verbose=True,
use_cached: bool = False,
):
select_s, filter_s = query_str.split(" WHERE ")
select_fields = select_s.replace(" ", "").split(",")
parts = filter_s.split(" LIMIT ")
filter_s = parts[0]
limit = None
if len(parts) > 1:
limit = int(parts[1])
filter_fields = filter_s.replace(" ", "").split(",")
filter_fields = [s.split("=") for s in filter_fields]
filter_fields = {k: v for k, v in filter_fields}
return query(
select_fields,
filter_fields,
proj_cfg,
verbose=verbose,
limit=limit,
use_cached=use_cached,
)
def fetch_data_from_cfg(
plot_cfg_path: str,
add_query_fields: Optional[List[str]] = None,
error_ok: bool = False,
method_key: str = "methods",
) -> pd.DataFrame:
"""
See the README for how the YAML file at `plot_cfg_path` should be structured.
"""
cfg = OmegaConf.load(plot_cfg_path)
if add_query_fields is None:
add_query_fields = []
query_k = cfg.plot_key
methods = cfg[method_key]
result = batch_query(
[[query_k, *add_query_fields, *cfg.get("add_query_keys", [])] for _ in methods],
[{cfg.method_spec: v} for v in methods.values()],
all_should_skip=[len(v) == 0 for v in methods.values()],
all_add_info=[{"method": k} for k in methods.keys()],
proj_cfg=OmegaConf.load(cfg.proj_cfg),
use_cached=cfg.use_cached,
verbose=False,
error_ok=error_ok,
)
return pd.DataFrame(result)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--cfg", required=True, type=str)
parser.add_argument("--cache", action="store_true")
args, query_args = parser.parse_known_args()
query_args = " ".join(query_args)
proj_cfg = OmegaConf.load(args.cfg)
result = query_s(query_args, proj_cfg, use_cached=args.cache, verbose=False)
result_summary = {}
keys = list(result[0].keys())
for k in keys:
values = [r[k] for r in result]
if isinstance(values[0], float):
result_summary[f"{k} (mean, std)"] = (
np.mean(values),
np.std(values),
)
pprint(result)
if len(result_summary) > 0:
pprint(result_summary) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/plotting/wb_query.py | 0.706798 | 0.19112 | wb_query.py | pypi |
import argparse
from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from omegaconf import OmegaConf
from rl_utils.plotting.utils import combine_dicts_to_df, fig_save
from rl_utils.plotting.wb_query import batch_query
MARKER_ORDER = ["^", "<", "v", "d", "s", "x", "o", ">"]
def smooth_arr(scalars: List[float], weight: float) -> List[float]:
"""
Taken from the answer here https://stackoverflow.com/questions/42281844/what-is-the-mathematics-behind-the-smoothing-parameter-in-tensorboards-scalar
:param weight: Between 0 and 1.
"""
last = scalars[0] # First value in the plot (first timestep)
smoothed = []
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def make_steps_match(plot_df, group_key, x_name):
all_dfs = []
for _, method_df in plot_df.groupby([group_key]):
grouped_runs = method_df.groupby(["run"])
max_len = -1
max_step_idxs = None
for _, run_df in grouped_runs:
if len(run_df) > max_len:
max_len = len(run_df)
max_step_idxs = run_df[x_name]
for _, run_df in grouped_runs:
run_df[x_name] = max_step_idxs[: len(run_df)]
all_dfs.append(run_df)
return pd.concat(all_dfs)
def line_plot(
plot_df,
x_name: str,
y_name: str,
avg_key: str,
group_key: str,
smooth_factor: Union[Dict[str, float], float] = 0.0,
ax: Optional[matplotlib.axes.Axes] = None,
y_bounds: Optional[Tuple[float, float]] = None,
y_disp_bounds: Optional[Tuple[float, float]] = None,
x_disp_bounds: Optional[Tuple[float, float]] = None,
group_colors: Optional[Dict[str, int]] = None,
xtick_fn=None,
ytick_fn=None,
legend: bool = False,
rename_map: Optional[Dict[str, str]] = None,
title=None,
axes_font_size=14,
title_font_size=18,
legend_font_size="x-large",
method_idxs: Optional[Dict[str, int]] = None,
num_marker_points: Optional[Dict[str, int]] = None,
line_styles: Optional[Dict[str, str]] = None,
tight=False,
nlegend_cols=1,
fetch_std=False,
y_logscale=False,
x_logscale=False,
legend_loc: Optional[str] = None,
ax_dims: Tuple[int, int] = (5, 4),
):
"""
:param avg_key: This is typically the seed.
:param group_key: These are the different lines.
:param smooth_factor: Can specify a different smooth factor per method if desired.
:param y_bounds: What the data plot values are clipped to.
:param y_disp_bounds: What the plotting is stopped at.
:param ax: If not specified, one is automatically created, with the specified dimensions under `ax_dims`
:param group_colors: If not specified defaults to `method_idxs`.
:param num_marker_points: Key maps method name to the number of markers
drawn on the line, NOT the number of points that are plotted! By
default this is 8.
:param legend: Whether to include a legend within the plot.
:returns: The plotted figure.
"""
fig = None
if ax is None:
fig, ax = plt.subplots(figsize=ax_dims)
if rename_map is None:
rename_map = {}
if line_styles is None:
line_styles = {}
if num_marker_points is None:
num_marker_points = {}
if method_idxs is None:
method_idxs = {k: i for i, k in enumerate(plot_df[group_key].unique())}
plot_df = plot_df.copy()
if tight:
plt.tight_layout(pad=2.2)
if group_colors is None:
group_colors = method_idxs
colors = sns.color_palette()
group_colors = {k: colors[i] for k, i in group_colors.items()}
avg_y_df = plot_df.groupby([group_key, x_name]).mean()
std_y_df = plot_df.groupby([group_key, x_name]).std()
if y_name in plot_df.columns and y_name not in avg_y_df.columns:
raise ValueError(
f"Desired column {y_name} lost in the grouping. Make sure it is a numeric type"
)
method_runs = plot_df.groupby(group_key)[avg_key].unique()
if fetch_std:
y_std = y_name + "_std"
new_df = []
for k, sub_df in plot_df.groupby([group_key]):
where_matches = avg_y_df.index.get_level_values(0) == k
use_df = avg_y_df[where_matches]
if np.isnan(sub_df.iloc[0][y_std]):
use_df["std"] = std_y_df[where_matches][y_name]
else:
use_df["std"] = avg_y_df[where_matches][y_std]
new_df.append(use_df)
avg_y_df = pd.concat(new_df)
else:
avg_y_df["std"] = std_y_df[y_name]
lines = []
names = []
# Update the legend info with any previously plotted lines
if ax.get_legend() is not None:
all_lines = ax.get_lines()
for i, n in enumerate(ax.get_legend().get_texts()):
names.append(n.get_text())
lines.append((all_lines[i * 2 + 1], all_lines[i * 2]))
if not isinstance(smooth_factor, dict):
smooth_factor_lookup = defaultdict(lambda: smooth_factor)
else:
smooth_factor_lookup = defaultdict(lambda: 0.0)
for k, v in smooth_factor.items():
smooth_factor_lookup[k] = v
for name, sub_df in avg_y_df.groupby(level=0):
names.append(name)
x_vals = sub_df.index.get_level_values(x_name).to_numpy()
y_vals = sub_df[y_name].to_numpy()
if x_disp_bounds is not None:
use_y_vals = sub_df[
sub_df.index.get_level_values(x_name) < x_disp_bounds[1]
][y_name].to_numpy()
else:
use_y_vals = y_vals
print(
f"{name}: n_seeds: {len(method_runs[name])} (from WB run IDs {list(method_runs[name])})",
max(use_y_vals),
use_y_vals[-1],
)
y_std = sub_df["std"].fillna(0).to_numpy()
use_smooth_factor = smooth_factor_lookup[name]
if use_smooth_factor != 0.0:
y_vals = np.array(smooth_arr(y_vals, use_smooth_factor))
y_std = np.array(smooth_arr(y_std, use_smooth_factor))
add_kwargs = {}
if name in line_styles:
add_kwargs["linestyle"] = line_styles[name]
line_to_add = ax.plot(x_vals, y_vals, **add_kwargs)
sel_vals = [
int(x)
for x in np.linspace(0, len(x_vals) - 1, num=num_marker_points.get(name, 8))
]
midx = method_idxs[name] % len(MARKER_ORDER)
ladd = ax.plot(
x_vals[sel_vals],
y_vals[sel_vals],
MARKER_ORDER[midx],
label=rename_map.get(name, name),
color=group_colors[name],
markersize=8,
)
lines.append((ladd[0], line_to_add[0]))
plt.setp(line_to_add, linewidth=2, color=group_colors[name])
min_y_fill = y_vals - y_std
max_y_fill = y_vals + y_std
if y_bounds is not None:
min_y_fill = np.clip(min_y_fill, y_bounds[0], y_bounds[1])
max_y_fill = np.clip(max_y_fill, y_bounds[0], y_bounds[1])
ax.fill_between(
x_vals, min_y_fill, max_y_fill, alpha=0.2, color=group_colors[name]
)
if y_disp_bounds is not None:
ax.set_ylim(*y_disp_bounds)
if x_disp_bounds is not None:
ax.set_xlim(*x_disp_bounds)
if xtick_fn is not None:
plt.xticks(ax.get_xticks(), [xtick_fn(t) for t in ax.get_xticks()])
if ytick_fn is not None:
plt.yticks(ax.get_yticks(), [ytick_fn(t) for t in ax.get_yticks()])
if legend:
labs = [(i, line_to_add[0].get_label()) for i, line_to_add in enumerate(lines)]
labs = sorted(labs, key=lambda x: method_idxs[names[x[0]]])
kwargs = {}
if legend_loc is not None:
kwargs["loc"] = legend_loc
plt.legend(
[lines[i] for i, _ in labs],
[x[1] for x in labs],
fontsize=legend_font_size,
ncol=nlegend_cols,
**kwargs,
)
ax.grid(b=True, which="major", color="lightgray", linestyle="--")
ax.set_xlabel(rename_map.get(x_name, x_name), fontsize=axes_font_size)
ax.set_ylabel(rename_map.get(y_name, y_name), fontsize=axes_font_size)
if x_logscale:
ax.set_xscale("log")
if y_logscale:
ax.set_yscale("log")
if title is not None and title != "":
ax.set_title(title, fontsize=title_font_size)
return fig, ax
def gen_fake_data(x_scale, data_key, n_runs=5):
def create_sigmoid():
noise = np.random.normal(0, 0.01, 100)
x = np.linspace(0.0, 8.0, 100)
y = 1 / (1 + np.exp(-x))
y += noise
return x, y
df = None
for i in range(n_runs):
x, y = create_sigmoid()
sub_df = pd.DataFrame({"_step": [int(x_i * x_scale) for x_i in x], data_key: y})
sub_df["run"] = f"run_{i}"
if df is None:
df = sub_df
else:
df = pd.concat([df, sub_df])
df["method"] = "fake"
return df
def export_legend(ax, line_width, filename):
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.axis("off")
legend = ax2.legend(
*ax.get_legend_handles_labels(),
frameon=False,
loc="lower center",
ncol=10,
handlelength=2,
)
for line in legend.get_lines():
line.set_linewidth(line_width)
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(filename, dpi="figure", bbox_inches=bbox)
print("Saved legend to ", filename)
def plot_legend(
names: List[str],
save_path: str,
plot_colors: Dict[str, int],
name_map: Optional[Dict[str, str]] = None,
linestyles: Optional[List[str]] = None,
darkness: float = 0.1,
marker_width: float = 0.0,
marker_size: float = 0.0,
line_width: float = 3.0,
alphas: Optional[Dict[str, float]] = None,
):
"""
:param names: The list of names to appear on the legend.
:param plot_colors: Maps into the colors of the palette.
:param name_map: Rename map
"""
if name_map is None:
name_map = {}
if linestyles is None:
linestyles = []
if alphas is None:
alphas = {}
colors = sns.color_palette()
group_colors = {name: colors[idx] for name, idx in plot_colors.items()}
fig, ax = plt.subplots(figsize=(5, 4))
for name in names:
add_kwargs = {}
if name in linestyles:
linestyle = linestyles[name]
if isinstance(linestyle, list):
add_kwargs["linestyle"] = linestyle[0]
add_kwargs["dashes"] = linestyle[1]
else:
add_kwargs["linestyle"] = linestyle
disp_name = name_map[name]
midx = plot_colors[name] % len(MARKER_ORDER)
marker = MARKER_ORDER[midx]
if marker == "x":
marker_width = 2.0
marker_alpha = alphas.get(name, 1.0)
use_color = (*group_colors[name], marker_alpha)
ax.plot(
[0],
[1],
marker=marker,
label=disp_name,
color=use_color,
markersize=marker_size,
markeredgewidth=marker_width,
# markeredgecolor=(darkness, darkness, darkness, 1),
markeredgecolor=use_color,
**add_kwargs,
)
export_legend(
ax,
line_width,
save_path,
)
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cfg", type=str, required=True)
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
query_k = "ALL_" + cfg.plot_key
result = batch_query(
[[query_k] for _ in cfg.methods],
[{cfg.method_spec: v} for v in cfg.methods.values()],
all_should_skip=[len(v) == 0 for v in cfg.methods.values()],
all_add_info=[{"method": k} for k in cfg.methods.keys()],
proj_cfg=OmegaConf.load(cfg.proj_cfg),
use_cached=cfg.use_cached,
verbose=False,
)
df = combine_dicts_to_df(result, query_k)
fig = line_plot(df, "_step", cfg.plot_key, "rank", "method", **cfg.plot_params)
fig_save("data/vis", cfg.save_name, fig) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/plotting/auto_line.py | 0.894185 | 0.396506 | auto_line.py | pypi |
import argparse
from typing import Dict, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from omegaconf import OmegaConf
from rl_utils.plotting.utils import fig_save
from rl_utils.plotting.wb_query import batch_query
MISSING_VAL = 0.24444
ERROR_VAL = 0.3444
def plot_bar(
plot_df: pd.DataFrame,
group_key: str,
plot_key: str,
name_ordering: Optional[str] = None,
name_colors=None,
rename_map: Optional[Dict[str, str]] = None,
show_ticks: bool = True,
tic_font_size: int = 14,
axis_font_size: int = 14,
legend_font_size: int = 14,
y_disp_bounds: Tuple[float, float] = None,
title: str = "",
error_scaling=1.0,
missing_fill_value: float = MISSING_VAL,
error_fill_value: float = ERROR_VAL,
bar_group_key: Optional[str] = None,
base_bar_width: float = 0.35,
bar_darkness: float = 0.2,
bar_alpha: float = 0.9,
bar_pad: float = 0.2,
within_group_padding: float = 0.01,
group_colors: Optional[Dict[str, Tuple[float, float, float]]] = None,
legend: bool = False,
xlabel: Optional[str] = None,
xlabel_rot: int = 30,
):
"""
:param group_key: The key to take the average/std over. Likely the method key.
:param name_ordering: Order of the names on the x-axis.
:param bar_pad: Distance between bar groups
:param within_group_padding: Distance between bars within bar group.
:param bar_group_key: Group columns next to each other.
:param base_bar_width: Bar width. Scaled by the # of bars per group.
:param group_colors: Maps the bar group key to a color (RGB float tuple
[0,1]). Overrides `name_colors`.
:param xlabel_rot: The rotation (in degrees) of the labels on the x-axis.
"""
def_idx = [(k, i) for i, k in enumerate(plot_df[group_key].unique())]
if name_ordering is None:
name_ordering = [x for x, _ in def_idx]
colors = sns.color_palette()
if name_colors is None:
name_colors = {k: colors[v] for k, v in def_idx}
if rename_map is None:
rename_map = {}
plot_df = plot_df.replace("missing", missing_fill_value)
plot_df = plot_df.replace("error", error_fill_value)
plot_df[plot_key] = plot_df[plot_key].astype("float")
bar_grouped = plot_df.groupby(bar_group_key)
num_grouped = len(bar_grouped)
bar_width = base_bar_width / num_grouped
start_x = 0.0
within_group_spacing = bar_width + within_group_padding
fig, ax = plt.subplots()
for bar_group_name, sub_df in bar_grouped:
df_avg_y = sub_df.groupby(group_key).mean()
df_std_y = sub_df.groupby(group_key).std()
avg_y = []
std_y = []
name_ordering = [n for n in name_ordering if n in df_avg_y.index]
is_missing = []
is_error = []
for name in name_ordering:
is_missing.append(df_avg_y[plot_key].loc[name] == missing_fill_value)
is_error.append(df_avg_y[plot_key].loc[name] == error_fill_value)
avg_y.append(df_avg_y.loc[name][plot_key])
std_y.append(df_std_y.loc[name][plot_key] * error_scaling)
if group_colors is None:
colors = [name_colors[x] for x in name_ordering]
else:
colors = [group_colors[bar_group_name] for _ in name_ordering]
N = len(avg_y)
end_x = round(start_x + N * (bar_width + bar_pad), 3)
use_x = np.linspace(start_x, end_x, N)
bars = ax.bar(
use_x,
avg_y,
width=bar_width,
color=colors,
align="center",
alpha=bar_alpha,
yerr=std_y,
edgecolor=(0, 0, 0, 1.0),
error_kw={
"ecolor": (bar_darkness, bar_darkness, bar_darkness, 1.0),
"lw": 2,
"capsize": 3,
"capthick": 2,
},
label=rename_map.get(bar_group_name, bar_group_name),
)
start_x += within_group_spacing
for i, bar in enumerate(bars):
if is_missing[i]:
missing_opacity = 0.1
# prev_color = bar.get_facecolor()
bar.set_edgecolor((1, 0, 0, missing_opacity))
bar.set_hatch("//")
elif is_error[i]:
missing_opacity = 0.1
# prev_color = bar.get_facecolor()
bar.set_edgecolor((0, 0, 1, missing_opacity))
bar.set_hatch("//")
if show_ticks:
xtic_names = [rename_map.get(x, x) for x in name_ordering]
else:
xtic_names = ["" for x in name_ordering]
xtic_locs = use_x
ax.set_xticks(xtic_locs)
ax.set_xticklabels(xtic_names, rotation=xlabel_rot, fontsize=tic_font_size)
ax.set_ylabel(rename_map.get(plot_key, plot_key), fontsize=axis_font_size)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=axis_font_size)
if y_disp_bounds is not None:
ax.set_ylim(*y_disp_bounds)
if title != "":
ax.set_title(title)
if legend:
ax.legend(fontsize=legend_font_size)
for lab in ax.get_yticklabels():
lab.set_fontsize(tic_font_size)
return fig
def plot_from_file(plot_cfg_path, add_query_fields=None):
cfg = OmegaConf.load(plot_cfg_path)
if add_query_fields is None:
add_query_fields = []
query_k = cfg.plot_key
result = batch_query(
[[query_k, *add_query_fields] for _ in cfg.methods],
[{cfg.method_spec: v} for v in cfg.methods.values()],
all_should_skip=[len(v) == 0 for v in cfg.methods.values()],
all_add_info=[{"method": k} for k in cfg.methods.keys()],
proj_cfg=OmegaConf.load(cfg.proj_cfg),
use_cached=cfg.use_cached,
verbose=False,
)
df = pd.DataFrame(result)
fig = plot_bar(df, "method", query_k, **cfg.plot_params)
fig_save("data/vis", cfg.save_name, fig)
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cfg", type=str, required=True)
args = parser.parse_args()
plot_from_file(args.cfg) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/plotting/auto_bar.py | 0.849769 | 0.371137 | auto_bar.py | pypi |
import gym
import numpy as np
import torch
import rl_utils.common.core_utils as utils
from rl_utils.envs.vec_env.vec_env import VecEnvWrapper
# Checks whether done was caused my timit limits or not
class TimeLimitMask(gym.Wrapper):
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done and self.env._max_episode_steps == self.env._elapsed_steps:
info["bad_transition"] = True
return obs, rew, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
super(VecPyTorch, self).__init__(venv)
self.device = device
def _data_convert(self, arr):
if isinstance(arr, np.ndarray) and arr.dtype == np.float64:
return arr.astype(np.float32)
return arr
def reset(self):
obs = self.venv.reset()
return self._trans_obs(obs)
def step_async(self, actions):
if isinstance(actions, torch.LongTensor):
# Squeeze the dimension for discrete actions
actions = actions.squeeze(1)
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def _trans_obs(self, obs):
# Support for dict observations
def _convert_obs(x):
x = self._data_convert(x)
x = torch.Tensor(x)
return x.to(self.device)
if isinstance(obs, dict):
for k in obs:
obs[k] = _convert_obs(obs[k])
else:
return _convert_obs(obs)
return obs
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
obs = self._trans_obs(obs)
reward = torch.Tensor(reward).unsqueeze(dim=1)
# Reward is sometimes a Double. Observation is considered to always be
# float32
reward = reward.float()
return obs, reward, done, info
# Derived from
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py
class VecPyTorchFrameStack(VecEnvWrapper):
"""
For now, this will only stack the "observation" key in dictionary
observation spaces.
"""
def __init__(self, venv, nstack, device):
self.venv = venv
self.nstack = nstack
ob_space = venv.observation_space
self.stacked_obs = utils.StackHelper(
ob_space.shape, nstack, device, venv.num_envs
)
new_obs_space = utils.update_obs_space(
venv.observation_space,
utils.reshape_obs_space(ob_space, self.stacked_obs.get_shape()),
)
VecEnvWrapper.__init__(self, venv, observation_space=new_obs_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
stacked_obs, infos = self.stacked_obs.update_obs(obs, news, infos)
return stacked_obs, rews, news, infos
def reset(self):
obs = self.venv.reset()
stacked_obs = self.stacked_obs.reset(obs)
return utils.set_def_obs(obs, stacked_obs)
def close(self):
self.venv.close() | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/wrappers.py | 0.906044 | 0.469459 | wrappers.py | pypi |
from functools import partial
from typing import Callable, Optional
import gym
import torch
import rl_utils.envs.pointmass # noqa: F401
from rl_utils.envs.registry import full_env_registry
from rl_utils.envs.vec_env.dummy_vec_env import DummyVecEnv
from rl_utils.envs.vec_env.shmem_vec_env import ShmemVecEnv
from rl_utils.envs.vec_env.vec_env import VecEnv
from rl_utils.envs.vec_env.vec_env_wrappers import (
VecEnvClipActions,
VecEnvPermuteFrames,
)
from rl_utils.envs.vec_env.vec_monitor import VecMonitor
from rl_utils.envs.wrappers import TimeLimitMask, VecPyTorch, VecPyTorchFrameStack
def create_vectorized_envs(
env_id: str,
num_envs: int,
seed: int = 0,
*,
device: Optional[torch.device] = None,
context_mode: str = "spawn",
create_env_fn: Optional[Callable[[int], None]] = None,
force_multi_proc: bool = False,
num_frame_stack: Optional[int] = None,
clip_actions: bool = False,
permute_frames: bool = False,
**kwargs,
) -> VecEnv:
found_full_env_cls = full_env_registry.search_env(env_id)
if found_full_env_cls is not None:
# print(f"Found {found_full_env_cls} for env {env_id}")
return found_full_env_cls(num_envs=num_envs, seed=seed, device=device, **kwargs)
def full_create_env(rank):
full_seed = seed + rank
if create_env_fn is None:
env = gym.make(env_id)
else:
env = create_env_fn(full_seed)
if str(env.__class__.__name__).find("TimeLimit") >= 0:
env = TimeLimitMask(env)
env.seed(full_seed)
if hasattr(env.action_space, "seed"):
env.action_space.seed(full_seed)
return env
envs = [partial(full_create_env, rank=i) for i in range(num_envs)]
if num_envs > 1 or force_multi_proc:
envs = ShmemVecEnv(envs, context=context_mode)
else:
envs = DummyVecEnv(envs)
if device is None:
device = torch.device("cpu")
envs = VecMonitor(envs)
envs = VecPyTorch(envs, device)
if permute_frames:
envs = VecEnvPermuteFrames(envs)
if clip_actions:
envs = VecEnvClipActions(envs)
if num_frame_stack is not None:
return VecPyTorchFrameStack(envs, num_frame_stack, device)
return envs | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/env_creator.py | 0.596433 | 0.349297 | env_creator.py | pypi |
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
import torch
from rl_utils.envs.pointmass.pointmass_env import PointMassEnv, PointMassParams
from rl_utils.envs.registry import full_env_registry
@dataclass(frozen=True)
class SquareObstacle:
"""
* x,y position of the CENTER of the square.
* Obstacle width
* Obstacle length
* Obstacle rotation angle (in degrees)
"""
xy: Tuple[float, float]
width: float
height: float
rot_deg: float
@dataclass(frozen=True)
class PointMassObstacleParams(PointMassParams):
goal_thresh: float = 0.05
square_obstacles: List[SquareObstacle] = field(default_factory=list)
@full_env_registry.register_env("PointMassObstacle-v0")
class PointMassObstacleEnv(PointMassEnv):
def __init__(
self,
num_envs: int,
params: Optional[PointMassObstacleParams] = None,
device: Optional[torch.device] = None,
set_eval: bool = False,
seed: Optional[int] = None,
**kwargs,
):
if params is None:
params = PointMassObstacleParams()
super().__init__(num_envs, params, device, set_eval, seed, **kwargs)
self._circle_obs = []
self._square_obs_T = []
for obs in self._params.square_obstacles:
rot = obs.rot_deg * (np.pi / 180.0)
rot_T = torch.tensor(
[
[np.cos(rot), -np.sin(rot), 0.0],
[np.sin(rot), np.cos(rot), 0.0],
[0.0, 0.0, 1.0],
],
device=self._device,
dtype=torch.float,
)
trans_T = torch.tensor(
[
[1.0, 0.0, obs.xy[0]],
[0.0, 1.0, obs.xy[1]],
[0.0, 0.0, 1.0],
],
device=self._device,
dtype=torch.float,
)
self._square_obs_T.append(
(
trans_T @ rot_T,
obs.width,
obs.height,
)
)
def _add_to_info(self, all_info):
dists = self._get_dist()
for i in range(self._batch_size):
all_info[i]["at_goal"] = dists[i].item() < self._params.goal_thresh
return all_info
def forward(self, cur_pos, action):
action = action.to(self._device)
action = torch.clamp(action, -1.0, 1.0)
new_pos = cur_pos + (action * self._params.dt)
if self._params.clip_bounds:
new_pos = torch.clamp(
new_pos,
-self._params.position_limit,
self._params.position_limit,
)
for ob_pos, ob_radius in self._circle_obs:
local_pos = new_pos - ob_pos
local_dist = torch.linalg.norm(local_pos, dim=-1)
coll_idxs = torch.nonzero(local_dist < ob_radius)
norm_pos = (local_pos / local_dist.view(-1, 1)) * ob_radius
adjusted_pos = ob_pos + norm_pos
new_pos[coll_idxs] = adjusted_pos[coll_idxs]
inside_obstacle = self.is_inside_obstacle(new_pos)
new_pos[inside_obstacle] = cur_pos[inside_obstacle]
return new_pos
def is_inside_obstacle(self, pos: torch.Tensor) -> torch.BoolTensor:
"""
:param pos: A tensor of shape (N, 2).
:returns: Tensor of shape (N,) indicating if the points were inside the obstacle.
"""
homo_pos = torch.cat(
[pos, torch.ones(pos.shape[0], 1, device=self._device)], dim=-1
)
inside_any_box = torch.zeros(
pos.shape[0], device=self._device, dtype=torch.bool
)
for obs_T, xlen, ylen in self._square_obs_T:
local_pos = torch.linalg.inv(obs_T) @ homo_pos.T
inside_x = torch.logical_and(
local_pos[0] < (xlen / 2), local_pos[0] > -(xlen / 2)
)
inside_y = torch.logical_and(
local_pos[1] < (ylen / 2), local_pos[1] > -(ylen / 2)
)
inside_box = torch.logical_and(inside_x, inside_y)
inside_any_box |= inside_box
return inside_any_box | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/pointmass/pointmass_obstacle.py | 0.933104 | 0.558508 | pointmass_obstacle.py | pypi |
from dataclasses import dataclass
from typing import Callable, Optional
import numpy as np
import torch
from gym import spaces
from torch.distributions import Uniform
from rl_utils.envs.registry import full_env_registry
from rl_utils.envs.vec_env.vec_env import FINAL_OBS_KEY, VecEnv
@dataclass(frozen=True)
class PointMassParams:
"""
:param force_eval_start_dist: Generate the start positions from the eval offset.
:param force_train_start_dist: Generate the start positions from the train offset.
:param clip_bounds: Clip the agent to be within [-position_limit, position_limit]^2 ?
:param clip_actions: Clip the actions to be within -1 to 1.
:param ep_horizon: The length of the episode.
:param custom_reward: A function that takes as input the current position,
previous position, and action and outputs a reward value. All are PyTorch
tensors of shape (N,) where N is the number of environments.
:param random_start_region_sample: If False, then the starting state will
iterate clockwise around the possible spawning regions.
"""
force_eval_start_dist: bool = False
force_train_start_dist: bool = True
clip_bounds: bool = True
clip_actions: bool = True
ep_horizon: int = 5
num_train_regions: int = 4
start_state_noise: float = np.pi / 20
dt: float = 0.2
reward_dist_pen: float = 1 / 10.0
start_idx: int = -1
radius: float = 1.0
eval_offset: float = 0.0
train_offset: float = np.pi / 4
position_limit: float = 1.5
transition_noise: float = 0.0
random_start_region_sample: bool = True
custom_reward: Optional[
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]
] = None
@full_env_registry.register_env("PointMass-v0")
class PointMassEnv(VecEnv):
def __init__(
self,
num_envs: int,
params: Optional[PointMassParams] = None,
device: Optional[torch.device] = None,
set_eval: bool = False,
seed: Optional[int] = None,
**kwargs,
):
if params is None:
params = PointMassParams()
if device is None:
device = torch.device("cpu")
self._batch_size = num_envs
self._params = params
self._device = device
self._goal = torch.tensor([0.0, 0.0]).to(self._device)
self._ep_step = 0
self._prev_dist_idx = -1
self._ep_rewards = []
obs_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), seed=seed)
ac_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), seed=seed)
self._is_eval = set_eval or self._params.force_eval_start_dist
if self._params.force_train_start_dist:
self._is_eval = False
if self._is_eval:
regions = self.get_regions(
self._params.eval_offset, self._params.start_state_noise
)
else:
regions = self.get_regions(
self._params.train_offset, self._params.start_state_noise
)
if self._params.start_state_noise != 0:
self._start_distributions = Uniform(regions[:, 0], regions[:, 1])
else:
self._start_distributions = SingleSampler(regions[:, 0])
super().__init__(
self._batch_size,
obs_space,
ac_space,
)
def step_async(self, actions):
pass
def step_wait(self):
pass
def forward(self, cur_pos, action):
action = action.to(self._device)
if self._params.clip_actions:
action = torch.clamp(action, -1.0, 1.0)
new_pos = cur_pos + (action * self._params.dt)
if self._params.transition_noise != 0.0:
action += self._params.transition_noise * torch.randn(
action.shape, device=self._device
)
if self._params.clip_bounds:
new_pos = torch.clamp(
new_pos,
-self._params.position_limit,
self._params.position_limit,
)
return new_pos
def step(self, action):
self.cur_pos = self.forward(self.cur_pos, action)
self._ep_step += 1
self._store_actions.append(action)
is_done = self._ep_step >= self._params.ep_horizon
reward = self._get_reward(action)
self._ep_rewards.append(reward)
all_is_done = torch.tensor(
[is_done for _ in range(self._batch_size)], dtype=torch.bool
)
dist_to_goal = torch.linalg.norm(
self._goal - self.cur_pos, dim=-1, keepdims=True
)
all_info = [
{"dist_to_goal": dist_to_goal[i].item()} for i in range(self._batch_size)
]
all_info = self._add_to_info(all_info)
if is_done:
store_actions = torch.stack(self._store_actions, dim=1)
action_magnitudes = torch.linalg.norm(store_actions, dim=-1)
final_obs = self._get_obs()
for i in range(self._batch_size):
all_info[i]["episode"] = {
"r": torch.stack(self._ep_rewards).sum(0)[i].item(),
"max_action_magnitude": action_magnitudes[i].max().item(),
"avg_action_magnitude": action_magnitudes[i].mean().item(),
}
all_info[i][FINAL_OBS_KEY] = final_obs[i]
self.reset()
return (self._get_obs(), reward, all_is_done, all_info)
def get_images(self, mode=None, img_dim=64, **kwargs) -> np.ndarray:
def convert_coordinate(coord):
# Normalize position to [0,1]
norm_pos = (coord + self._params.position_limit) / (
2 * self._params.position_limit
)
# Convert position to image space
return (norm_pos * img_dim).to(torch.long)
def write_to_img(img, pos, size, color):
lower_x = max(pos[0] - size, 0)
upper_x = min(pos[0] + size, img_dim)
lower_y = max(pos[1] - size, 0)
upper_y = min(pos[1] + size, img_dim)
img[lower_x:upper_x, lower_y:upper_y] = color
return img
agent_pos = convert_coordinate(self.cur_pos)
goal_pos = convert_coordinate(self._goal)
entity_size = img_dim // 32
img = np.full((self._batch_size, img_dim, img_dim, 3), 255, dtype=np.uint8)
agent_color = [8, 143, 143]
goal_color = [224, 17, 95]
for env_i in range(self._batch_size):
img[env_i] = write_to_img(
img[env_i], agent_pos[env_i], entity_size, agent_color
)
img[env_i] = write_to_img(img[env_i], goal_pos, entity_size, goal_color)
return img
def _add_to_info(self, all_info):
return all_info
def _get_dist(self):
return torch.linalg.norm(self._goal - self.cur_pos, dim=-1, keepdims=True)
def _get_reward(self, action):
if self._params.custom_reward is None:
dist_to_goal = torch.linalg.norm(
self._goal - self.cur_pos, dim=-1, keepdims=True
)
reward = -self._params.reward_dist_pen * dist_to_goal
else:
reward = self._params.custom_reward(self.cur_pos, self._prev_pos, action)
self._prev_pos = self.cur_pos.detach().clone()
return reward # noqa: R504
def get_regions(self, offset, spread):
inc = np.pi / 2
centers = [offset + i * inc for i in range(4)]
return torch.tensor(
[[center - spread, center + spread] for center in centers]
).to(self._device)
def _get_dist_idx(self, batch_size):
if not self._params.random_start_region_sample:
new_dist_idx = self._prev_dist_idx + 1
new_dist_idx = new_dist_idx % self._params.num_train_regions
self._prev_dist_idx = new_dist_idx
return torch.full((batch_size,), new_dist_idx)
if self._is_eval:
return torch.randint(0, 4, (batch_size,))
else:
if self._params.start_idx == -1:
return torch.randint(0, self._params.num_train_regions, (batch_size,))
else:
return torch.tensor([self._params.start_idx]).repeat(batch_size)
def _sample_start(self, batch_size, offset_start):
idx = self._get_dist_idx(batch_size).to(self._device)
samples = self._start_distributions.sample(idx.shape)
ang = samples.gather(1, idx.view(-1, 1)).view(-1)
return (
torch.stack(
[
self._params.radius * torch.cos(ang),
self._params.radius * torch.sin(ang),
],
dim=-1,
).to(self._device)
+ offset_start
)
def reset(self):
self.cur_pos = self._sample_start(self._batch_size, self._goal)
self._ep_step = 0
self._ep_rewards = []
self._prev_pos = self.cur_pos.detach().clone()
self._store_actions = []
return self._get_obs()
def _get_obs(self):
return self.cur_pos.clone()
class SingleSampler:
def __init__(self, point):
self.point = point
def sample(self, shape):
return self.point.unsqueeze(0).repeat(shape[0], 1) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/pointmass/pointmass_env.py | 0.935678 | 0.611382 | pointmass_env.py | pypi |
import ctypes
import multiprocessing as mp
from collections.abc import Iterable
import numpy as np
from rl_utils.common.core_utils import dict_to_obs, obs_space_info, obs_to_dict
from .vec_env import FINAL_OBS_KEY, CloudpickleWrapper, VecEnv, clear_mpi_env_vars
_NP_TO_CT = {
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
np.int32: ctypes.c_int32,
np.int8: ctypes.c_int8,
np.uint8: ctypes.c_char,
bool: ctypes.c_bool,
}
class ShmemVecEnv(VecEnv):
"""
Optimized version of SubprocVecEnv that uses shared variables to communicate observations.
"""
def __init__(self, env_fns, spaces=None, context="spawn"):
"""
If you don't specify observation_space, we'll have to create a dummy
environment to get it.
"""
ctx = mp.get_context(context)
if spaces:
observation_space, action_space = spaces
else:
dummy = env_fns[0]()
observation_space, action_space = (
dummy.observation_space,
dummy.action_space,
)
dummy.close()
del dummy
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(
observation_space
)
self.obs_bufs = [
{
k: ctx.Array(
_NP_TO_CT[self.obs_dtypes[k].type],
int(np.prod(self.obs_shapes[k])),
)
for k in self.obs_keys
}
for _ in env_fns
]
self.parent_pipes = []
self.procs = []
with clear_mpi_env_vars():
for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
wrapped_fn = CloudpickleWrapper(env_fn)
parent_pipe, child_pipe = ctx.Pipe()
proc = ctx.Process(
target=_subproc_worker,
args=(
child_pipe,
parent_pipe,
wrapped_fn,
obs_buf,
self.obs_shapes,
self.obs_dtypes,
self.obs_keys,
),
)
proc.daemon = True
self.procs.append(proc)
self.parent_pipes.append(parent_pipe)
proc.start()
child_pipe.close()
self.waiting_step = False
self.viewer = None
def reset(self):
if self.waiting_step:
print("Called reset() while waiting for the step to complete")
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(("reset", None))
return self._decode_obses([pipe.recv() for pipe in self.parent_pipes])
def step_async(self, actions):
assert len(actions) == len(self.parent_pipes)
for pipe, act in zip(self.parent_pipes, actions):
pipe.send(("step", act))
def step_wait(self):
outs = [pipe.recv() for pipe in self.parent_pipes]
obs, rews, dones, infos = zip(*outs)
return self._decode_obses(obs), np.array(rews), np.array(dones), infos
def close_extras(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(("close", None))
for pipe in self.parent_pipes:
pipe.recv()
pipe.close()
for proc in self.procs:
proc.join()
def get_images(self, mode="human", **kwargs):
N = len(self.parent_pipes)
all_pipe_kwargs = []
for i in range(N):
pipe_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Iterable):
pipe_kwargs[k] = kwargs[k][i]
else:
pipe_kwargs[k] = kwargs[k]
all_pipe_kwargs.append(pipe_kwargs)
for pipe, p_kwargs in zip(self.parent_pipes, all_pipe_kwargs):
pipe.send(("render", (mode, p_kwargs)))
return [pipe.recv() for pipe in self.parent_pipes]
def _decode_obses(self, obs):
result = {}
for k in self.obs_keys:
bufs = [b[k] for b in self.obs_bufs]
o = [
np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(
self.obs_shapes[k]
)
for b in bufs
]
result[k] = np.array(o)
return dict_to_obs(result)
def _subproc_worker(
pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys
):
"""
Control a single environment instance using IPC and
shared memory.
"""
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(
obs_shapes[k]
) # pylint: disable=W0212
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
cmd, data = pipe.recv()
if cmd == "reset":
pipe.send(_write_obs(env.reset()))
elif cmd == "step":
obs, reward, done, info = env.step(data)
if done:
final_obs = obs
info[FINAL_OBS_KEY] = final_obs
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif cmd == "render":
pipe.send(env.render(mode=data[0], **data[1]))
elif cmd == "close":
pipe.send(None)
break
else:
raise RuntimeError("Got unrecognized cmd %s" % cmd)
except KeyboardInterrupt:
print("ShmemVecEnv worker: got KeyboardInterrupt")
finally:
env.close() | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/vec_env/shmem_vec_env.py | 0.58676 | 0.253263 | shmem_vec_env.py | pypi |
import contextlib
import os
from abc import ABC, abstractmethod
import cloudpickle
from rl_utils.common.tile_images import tile_images
FINAL_OBS_KEY = "final_obs"
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = "already running an async step"
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = "not running an async step"
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {"render.modes": ["human", "rgb_array"]}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode="human", **kwargs):
imgs = self.get_images(mode=mode, **kwargs)
bigimg = tile_images(imgs)
if mode == "human":
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
else:
return bigimg
def get_images(self, mode=None, **kwargs):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(
self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space,
)
def step_async(self, actions):
self.venv.step_async(actions)
def reset(self):
return self.venv.reset()
def step_wait(self):
return self.venv.step_wait()
def close(self):
return self.venv.close()
def render(self, mode="human", **kwargs):
return self.venv.render(mode=mode, **kwargs)
def get_images(self, mode=None, **kwargs):
return self.venv.get_images(mode=mode, **kwargs)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper:
"""
Uses cloudpickle to serialize contents
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
self.x = cloudpickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ["OMPI_", "PMI_"]:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/envs/vec_env/vec_env.py | 0.830009 | 0.435361 | vec_env.py | pypi |
import os
import os.path as osp
from collections import defaultdict
from typing import Dict, Optional
import numpy as np
import torch
import torch.nn as nn
from rl_utils.common.core_utils import compress_and_filter_dict
from rl_utils.common.viz_utils import save_mp4
from rl_utils.envs.vec_env.vec_env import VecEnv
from rl_utils.interfaces import BasePolicy
class Evaluator:
"""
Dataset save format is meant to be consistent with https://github.com/rail-berkeley/d4rl/blob/master/d4rl/offline_env.py
"""
def __init__(
self,
envs: VecEnv,
rnn_hxs_dim: int,
num_render: Optional[int] = None,
vid_dir: str = "data/vids",
fps: int = 10,
save_traj_name: Optional[str] = None,
**kwargs,
):
"""
:param save_traj_name: The full file path (for example
"data/trajs/data.pth") to save the evaluated trajectories to.
:param num_render: If None then every episode will be rendered.
:param rnn_hxs_dim: The recurrent hidden state dimension.
"""
self._envs = envs
self._rnn_hxs_dim = rnn_hxs_dim
self._num_render = num_render
self._vid_dir = vid_dir
self._fps = fps
self._should_save_trajs = save_traj_name is not None
self._save_traj_name = save_traj_name
def _clear_save_trajs(self):
self._save_trajs_obs = defaultdict(list)
self._save_trajs_actions = defaultdict(list)
self._save_trajs_rewards = defaultdict(list)
self._save_trajs_done = defaultdict(list)
self._save_trajs_info = defaultdict(list)
self._all_traj_obs = []
self._all_traj_actions = []
self._all_traj_rewards = []
self._all_traj_done = []
self._all_traj_info = []
def _add_transition_to_save(self, env_i, obs, action, reward, done, info):
self._save_trajs_obs[env_i].append(obs[env_i])
self._save_trajs_actions[env_i].append(action[env_i])
self._save_trajs_rewards[env_i].append(reward[env_i])
self._save_trajs_done[env_i].append(done[env_i])
self._save_trajs_info[env_i].append(info[env_i])
def _flush_trajectory_to_save(self, env_i):
self._all_traj_obs.extend(self._save_trajs_obs[env_i])
self._all_traj_actions.extend(self._save_trajs_actions[env_i])
self._all_traj_rewards.extend(self._save_trajs_rewards[env_i])
self._all_traj_done.extend(self._save_trajs_done[env_i])
self._all_traj_info.extend(self._save_trajs_info[env_i])
self._save_trajs_obs[env_i].clear()
self._save_trajs_actions[env_i].clear()
self._save_trajs_rewards[env_i].clear()
self._save_trajs_done[env_i].clear()
self._save_trajs_info[env_i].clear()
@property
def eval_trajs_obs(self):
return self._all_traj_obs
@property
def eval_trajs_dones(self):
return self._all_traj_done
def _save_trajs(self):
assert self._save_traj_name is not None
obs = torch.stack(self._all_traj_obs, dim=0).detach()
actions = torch.stack(self._all_traj_actions, dim=0).detach()
rewards = torch.stack(self._all_traj_rewards, dim=0).detach()
terminals = torch.stack(self._all_traj_done, dim=0).detach()
num_steps = obs.shape[0]
assert (
actions.shape[0] == num_steps and len(actions.shape) == 2
), f"Action shape wrong {actions.shape}"
rewards = rewards.view(-1)
terminals = terminals.view(-1)
assert rewards.size(0) == num_steps, f"Reward is wrong shape {rewards.shape}"
assert (
terminals.size(0) == num_steps
), f"Terminals is wrong shape {terminals.shape}"
os.makedirs(osp.dirname(self._save_traj_name), exist_ok=True)
torch.save(
{
"observations": obs,
"actions": actions,
"rewards": rewards,
"terminals": terminals.float(),
"infos": self._all_traj_info,
},
self._save_traj_name,
)
print(f"Saved trajectories to {self._save_traj_name}")
self._clear_save_trajs()
def evaluate(
self, policy: BasePolicy, num_episodes: int, eval_i: int
) -> Dict[str, float]:
self._clear_save_trajs()
if isinstance(policy, nn.Module):
device = next(policy.parameters()).device
else:
device = torch.device("cpu")
num_envs = self._envs.num_envs
obs = self._envs.reset()
rnn_hxs = torch.zeros(num_envs, self._rnn_hxs_dim).to(device)
eval_masks = torch.zeros(num_envs, 1, device=device)
evals_per_proc = num_episodes // num_envs
left_over_evals = num_episodes % num_envs
num_evals = [evals_per_proc for _ in range(num_envs)]
num_evals[-1] += left_over_evals
all_frames = []
accum_stats = defaultdict(list)
total_evaluated = 0
if self._num_render is None:
num_render = num_episodes
else:
num_render = self._num_render
with torch.no_grad():
while sum(num_evals) != 0:
act_data = policy.act(obs, rnn_hxs, eval_masks, is_eval=True)
next_obs, rewards, done, info = self._envs.step(act_data["actions"])
rnn_hxs = act_data["hxs"]
if total_evaluated < num_render:
frames = self._envs.render(mode="rgb_array")
all_frames.append(frames)
for env_i in range(num_envs):
self._add_transition_to_save(
env_i, obs, act_data["actions"], rewards, done, info
)
if done[env_i]:
total_evaluated += 1
if num_evals[env_i] > 0:
self._flush_trajectory_to_save(env_i)
for k, v in compress_and_filter_dict(info[env_i]).items():
accum_stats[k].append(v)
num_evals[env_i] -= 1
obs = next_obs
if len(all_frames) > 0:
save_mp4(all_frames, self._vid_dir, f"eval_{eval_i}", self._fps)
if self._should_save_trajs:
self._save_trajs()
return {k: np.mean(v) for k, v in accum_stats.items()} | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/common/evaluator.py | 0.833562 | 0.245797 | evaluator.py | pypi |
from typing import Dict, List, Optional, Tuple
import torch
from torch.utils.data import Dataset
class DictDataset(Dataset):
def __init__(
self,
load_data: Dict[str, torch.Tensor],
load_keys: Optional[List[str]] = None,
detach_all: bool = True,
):
"""
:parameters load_keys: Subset of keys that are loaded from `load_data`.
"""
if load_keys is None:
load_keys = load_data.keys()
self._load_data = {
k: v.detach() if detach_all else v
for k, v in load_data.items()
if k in load_keys
}
tensor_sizes = [tensor.size(0) for tensor in self._load_data.values()]
if len(set(tensor_sizes)) != 1:
raise ValueError("Tensors to dataset are not of the same shape")
self._dataset_len = tensor_sizes[0]
@property
def all_data(self):
return self._load_data
def get_data(self, k: str) -> torch.Tensor:
return self._load_data[k]
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
return {k: v[idx] for k, v in self._load_data.items()}
def __len__(self) -> int:
return self._dataset_len
def extract_next_tensor(dataset: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
obs = dataset["observations"].detach()
final_final_obs = dataset["infos"][-1]["final_obs"]
next_obs = torch.cat([obs[1:], final_final_obs.unsqueeze(0)], 0)
num_eps = 1
for i in range(obs.shape[0] - 1):
cur_info = dataset["infos"][i]
if "final_obs" in cur_info:
num_eps += 1
next_obs[i] = cur_info["final_obs"].detach()
masks = ~(dataset["terminals"].bool())
num_terminals = masks.size(0) - masks.sum()
if num_eps != num_terminals.sum():
raise ValueError(
f"Inconsistency in # of episodes {num_eps} vs {dataset['terminals'].sum()}"
)
dataset["next_obs"] = next_obs.detach()
return dataset
def extract_final_obs(
obs: torch.Tensor, masks: torch.Tensor, final_obs: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param obs: Shape (N, ...)
:param masks: Shape (N, ...)
:param final_obs: Shape (N-1, ...)
:returns: obs, next_obs, masks all of shape (N-1, ...)
"""
cur_obs = obs[:-1]
masks = masks[1:]
next_obs = (masks * obs[1:]) + ((1 - masks) * final_obs)
return cur_obs, next_obs, masks | /rl-exp-utils-0.15.tar.gz/rl-exp-utils-0.15/rl_utils/common/datasets.py | 0.954858 | 0.501892 | datasets.py | pypi |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight.data, bias)
class NoisyFactorizedLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)
sigma_init = sigma_zero / math.sqrt(in_features)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_input", torch.zeros(1, in_features))
self.register_buffer("epsilon_output", torch.zeros(out_features, 1))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
def forward(self, input):
self.epsison_input.normal_()
self.epsilon_output.normal_()
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input.data)
eps_out = func(self.epsilon_output.data)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
class LSTMWithDones(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.weight_ih = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def forward(self, x, dones, init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
assert(init_states)
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
d = dones[:, t]
h_t = h_t * (1 - d)
c_t = c_t * (1 - d)
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
gates = x_t @ self.weight_ih + h_t @ self.weight_hh + self.bias
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS*2]), # forget
torch.tanh(gates[:, HS*2:HS*3]),
torch.sigmoid(gates[:, HS*3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=1)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(1, 0).contiguous()
return hidden_seq, (h_t, c_t) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/layers.py | 0.924313 | 0.59305 | layers.py | pypi |
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class DiscreteA2CAgent(a2c_common.DiscreteA2CBase):
def __init__(self, base_name, params):
a2c_common.DiscreteA2CBase.__init__(self, base_name, params)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'horizon_length' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'normalize_value' : self.normalize_value,
'network' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu,
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', False)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
self.has_value_loss = (self.has_central_value and self.use_experimental_cv) \
or (not self.has_phasic_policy_gradients and not self.has_central_value)
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
processed_obs = self._preproc_obs(obs['obs'])
action_masks = torch.BoolTensor(action_masks).to(self.ppo_device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'action_masks' : action_masks,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
input_dict = {
'is_train': False,
'states' : obs['states'],
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.is_multi_discrete:
action_masks = torch.cat(action_masks, dim=-1)
res_dict['action_masks'] = action_masks
return res_dict
def train_actor_critic(self, input_dict):
self.set_train()
self.calc_gradients(input_dict)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.last_lr
return self.train_result
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
if self.use_action_masks:
batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
batch_dict['bptt_len'] = self.bptt_len
batch_dict['dones'] = input_dict['dones']
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy = losses[0], losses[1], losses[2]
loss = a_loss + 0.5 *c_loss * self.critic_coef - entropy * self.entropy_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
self.trancate_gradients_and_step()
with torch.no_grad():
kl_dist = 0.5 * ((old_action_log_probs_batch - action_log_probs)**2)
if rnn_masks is not None:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() # / sum_mask
else:
kl_dist = kl_dist.mean()
self.diagnostics.mini_batch(self,
{
'values' : value_preds_batch,
'returns' : return_batch,
'new_neglogp' : action_log_probs,
'old_neglogp' : old_action_log_probs_batch,
'masks' : rnn_masks
}, curr_e_clip, 0)
self.train_result = (a_loss, c_loss, entropy, kl_dist,self.last_lr, lr_mul) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/a2c_discrete.py | 0.819388 | 0.310061 | a2c_discrete.py | pypi |
from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
MODEL_REGISTRY = {}
def register_network(name, target_class):
NETWORK_REGISTRY[name] = lambda **kwargs: target_class()
def register_model(name, target_class):
MODEL_REGISTRY[name] = lambda network, **kwargs: target_class(network)
class NetworkBuilder:
def __init__(self):
self.network_factory = object_factory.ObjectFactory()
self.network_factory.set_builders(NETWORK_REGISTRY)
self.network_factory.register_builder('actor_critic', lambda **kwargs: network_builder.A2CBuilder())
self.network_factory.register_builder('masa_actor_critic', lambda **kwargs: network_builder.A2CBuilderMasa())
self.network_factory.register_builder('resnet_actor_critic',
lambda **kwargs: network_builder.A2CResnetBuilder())
self.network_factory.register_builder('rnd_curiosity', lambda **kwargs: network_builder.RNDCuriosityBuilder())
self.network_factory.register_builder('soft_actor_critic', lambda **kwargs: network_builder.SACBuilder())
def load(self, params):
network_name = params['name']
network = self.network_factory.create(network_name)
network.load(params)
return network
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.set_builders(MODEL_REGISTRY)
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs: models.ModelA2C(network))
self.model_factory.register_builder('multi_discrete_a2c',
lambda network, **kwargs: models.ModelA2CMultiDiscrete(network))
self.model_factory.register_builder('continuous_a2c',
lambda network, **kwargs: models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd',
lambda network, **kwargs: models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('soft_actor_critic',
lambda network, **kwargs: models.ModelSACContinuous(network))
self.model_factory.register_builder('central_value',
lambda network, **kwargs: models.ModelCentralValue(network))
self.network_builder = NetworkBuilder()
def get_network_builder(self):
return self.network_builder
def load(self, params):
model_name = params['model']['name']
network = self.network_builder.load(params['network'])
model = self.model_factory.create(model_name, network=network)
return model | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/model_builder.py | 0.672869 | 0.187356 | model_builder.py | pypi |
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, params):
a2c_common.ContinuousA2CBase.__init__(self, base_name, params)
obs_shape = self.obs_shape
build_config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value' : self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(build_config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.bound_loss_type = self.config.get('bound_loss_type', 'bound') # 'regularisation' or 'bound'
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'horizon_length' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'normalize_value' : self.normalize_value,
'network' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu,
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
self.has_value_loss = (self.has_central_value and self.use_experimental_cv) \
or (not self.has_phasic_policy_gradients and not self.has_central_value)
self.algo_observer.after_init(self)
self.loss_supervise = torch.nn.MSELoss()
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
if self.config['method'] == 'ppo':
obs_batch = input_dict['obs'][:,0,:]
elif self.config['method'] == 'sappo':
obs_batch = input_dict['obs'][:,0,:]
obs_batch_symmetry = input_dict['obs'][:,1:,:]
elif self.config['method'] in ['mappo', 'masappo']:
obs_batch = input_dict['obs']
else:
raise ValueError(f"method {self.config['method']} not defined")
obs_batch = self._preproc_obs(obs_batch)
lr_mul = 1.0
curr_e_clip = self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
if self.bound_loss_type == 'regularisation':
b_loss = self.reg_loss(mu)
elif self.bound_loss_type == 'bound':
b_loss = self.bound_loss(mu)
else:
b_loss = torch.zeros(1, device=self.ppo_device)
a_loss_symmetry = torch.zeros(1, device=self.ppo_device)
c_loss_symmetry = torch.zeros(1, device=self.ppo_device)
coef_symmetry = 0.1
if self.config['method'] == 'sappo':
coef_symmetry /= (obs_batch_symmetry.shape[1])
for i in range(obs_batch_symmetry.shape[1]):
batch_dict_symmetry = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch_symmetry[:,i,:],
}
res_dict_symmetry = self.model(batch_dict_symmetry)
a_loss_symmetry += self.loss_supervise(res_dict['mus_symmetry'][:,i,:], res_dict_symmetry['mus'])
c_loss_symmetry += self.loss_supervise(res_dict['values'], res_dict_symmetry['values'])
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss , entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef \
+ coef_symmetry * a_loss_symmetry + coef_symmetry * c_loss_symmetry
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
self.trancate_gradients_and_step()
with torch.no_grad():
reduce_kl = rnn_masks is None
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if rnn_masks is not None:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.diagnostics.mini_batch(self,
{
'values' : value_preds_batch,
'returns' : return_batch,
'new_neglogp' : action_log_probs,
'old_neglogp' : old_action_log_probs_batch,
'masks' : rnn_masks
}, curr_e_clip, 0)
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def reg_loss(self, mu):
if self.bounds_loss_coef is not None:
reg_loss = (mu*mu).sum(axis=-1)
else:
reg_loss = 0
return reg_loss
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_min(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/a2c_continuous.py | 0.770681 | 0.313696 | a2c_continuous.py | pypi |
import torch
from torch import nn
import torch.distributed as dist
import gym
import numpy as np
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.common import schedulers
class CentralValueTrain(nn.Module):
def __init__(self, state_shape, value_size, ppo_device, num_agents, horizon_length, num_actors, num_actions, seq_len, normalize_value,network, config, writter, max_epochs, multi_gpu):
nn.Module.__init__(self)
self.ppo_device = ppo_device
self.num_agents, self.horizon_length, self.num_actors, self.seq_len = num_agents, horizon_length, num_actors, seq_len
self.normalize_value = normalize_value
self.num_actions = num_actions
self.state_shape = state_shape
self.value_size = value_size
self.max_epochs = max_epochs
self.multi_gpu = multi_gpu
self.truncate_grads = config.get('truncate_grads', False)
self.config = config
self.normalize_input = config['normalize_input']
state_config = {
'value_size' : value_size,
'input_shape' : state_shape,
'actions_num' : num_actions,
'num_agents' : num_agents,
'num_seqs' : num_actors,
'normalize_input' : self.normalize_input,
'normalize_value': self.normalize_value,
}
self.model = network.build(state_config)
self.lr = float(config['learning_rate'])
self.linear_lr = config.get('lr_schedule') == 'linear'
if self.linear_lr:
self.scheduler = schedulers.LinearScheduler(self.lr,
max_steps=self.max_epochs,
apply_to_entropy=False,
start_entropy_coef=0)
else:
self.scheduler = schedulers.IdentityScheduler()
self.mini_epoch = config['mini_epochs']
assert(('minibatch_size_per_env' in self.config) or ('minibatch_size' in self.config))
self.minibatch_size_per_env = self.config.get('minibatch_size_per_env', 0)
self.minibatch_size = self.config.get('minibatch_size', self.num_actors * self.minibatch_size_per_env)
self.num_minibatches = self.horizon_length * self.num_actors // self.minibatch_size
self.clip_value = config['clip_value']
self.writter = writter
self.weight_decay = config.get('weight_decay', 0.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), float(self.lr), eps=1e-08, weight_decay=self.weight_decay)
self.frame = 0
self.epoch_num = 0
self.running_mean_std = None
self.grad_norm = config.get('grad_norm', 1)
self.truncate_grads = config.get('truncate_grads', False)
self.e_clip = config.get('e_clip', 0.2)
self.truncate_grad = self.config.get('truncate_grads', False)
self.is_rnn = self.model.is_rnn()
self.rnn_states = None
self.batch_size = self.horizon_length * self.num_actors
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
total_agents = self.num_actors #* self.num_agents
num_seqs = self.horizon_length // self.seq_len
assert ((self.horizon_length * total_agents // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [ torch.zeros((num_seqs, s.size()[0], total_agents, s.size()[2]), dtype=torch.float32, device=self.ppo_device) for s in self.rnn_states]
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, True, self.is_rnn, self.ppo_device, self.seq_len)
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr], device=self.device)
dist.broadcast(lr_tensor, 0)
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def get_stats_weights(self, model_stats=False):
state = {}
if model_stats:
if self.normalize_input:
state['running_mean_std'] = self.model.running_mean_std.state_dict()
if self.normalize_value:
state['reward_mean_std'] = self.model.value_mean_std.state_dict()
return state
def set_stats_weights(self, weights):
pass
def update_dataset(self, batch_dict):
value_preds = batch_dict['old_values']
returns = batch_dict['returns']
actions = batch_dict['actions']
dones = batch_dict['dones']
rnn_masks = batch_dict['rnn_masks']
if self.num_agents > 1:
res = self.update_multiagent_tensors(value_preds, returns, actions, dones)
batch_dict['old_values'] = res[0]
batch_dict['returns'] = res[1]
batch_dict['actions'] = res[2]
batch_dict['dones'] = res[3]
if self.is_rnn:
states = []
for mb_s in self.mb_rnn_states:
t_size = mb_s.size()[0] * mb_s.size()[2]
h_size = mb_s.size()[3]
states.append(mb_s.permute(1,2,0,3).reshape(-1, t_size, h_size))
batch_dict['rnn_states'] = states
if self.num_agents > 1:
rnn_masks = res[3]
batch_dict['rnn_masks'] = rnn_masks
self.dataset.update_values_dict(batch_dict)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
obs_batch = copy.copy(obs_batch)
for k,v in obs_batch.items():
if v.dtype == torch.uint8:
obs_batch[k] = v.float() / 255.0
else:
obs_batch[k] = v
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
return obs_batch
def pre_step_rnn(self, n):
if not self.is_rnn:
return
if n % self.seq_len == 0:
for s, mb_s in zip(self.rnn_states, self.mb_rnn_states):
mb_s[n // self.seq_len,:,:,:] = s
def post_step_rnn(self, all_done_indices):
if not self.is_rnn:
return
all_done_indices = all_done_indices[::self.num_agents] // self.num_agents
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
def forward(self, input_dict):
return self.model(input_dict)
def get_value(self, input_dict):
self.eval()
obs_batch = input_dict['states']
actions = input_dict.get('actions', None)
obs_batch = self._preproc_obs(obs_batch)
res_dict = self.forward({'obs' : obs_batch, 'actions': actions,
'rnn_states': self.rnn_states,
'is_train' : False})
value, self.rnn_states = res_dict['values'], res_dict['rnn_states']
if self.num_agents > 1:
value = value.repeat(1, self.num_agents)
value = value.view(value.size()[0]*self.num_agents, -1)
return value
def train_critic(self, input_dict):
self.train()
loss = self.calc_gradients(input_dict)
return loss.item()
def update_multiagent_tensors(self, value_preds, returns, actions, dones):
batch_size = self.batch_size
ma_batch_size = self.num_actors * self.num_agents * self.horizon_length
value_preds = value_preds.view(self.num_actors, self.num_agents, self.horizon_length, self.value_size).transpose(0,1)
returns = returns.view(self.num_actors, self.num_agents, self.horizon_length, self.value_size).transpose(0,1)
value_preds = value_preds.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
returns = returns.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
dones = dones.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
return value_preds, returns, actions, dones
def train_net(self):
self.train()
loss = 0
for _ in range(self.mini_epoch):
for idx in range(len(self.dataset)):
loss += self.train_critic(self.dataset[idx])
if self.normalize_input:
self.model.running_mean_std.eval() # don't need to update statstics more than one miniepoch
avg_loss = loss / (self.mini_epoch * self.num_minibatches)
self.epoch_num += 1
self.lr, _ = self.scheduler.update(self.lr, 0, self.epoch_num, 0, 0)
self.update_lr(self.lr)
self.frame += self.batch_size
if self.writter != None:
self.writter.add_scalar('losses/cval_loss', avg_loss, self.frame)
self.writter.add_scalar('info/cval_lr', self.lr, self.frame)
return avg_loss
def calc_gradients(self, batch):
obs_batch = self._preproc_obs(batch['obs'])
value_preds_batch = batch['old_values']
returns_batch = batch['returns']
actions_batch = batch['actions']
dones_batch = batch['dones']
rnn_masks_batch = batch.get('rnn_masks')
batch_dict = {'obs' : obs_batch,
'actions' : actions_batch,
'seq_length' : self.seq_len,
'dones' : dones_batch}
if self.is_rnn:
batch_dict['rnn_states'] = batch['rnn_states']
res_dict = self.model(batch_dict)
values = res_dict['values']
loss = common_losses.critic_loss(value_preds_batch, values, self.e_clip, returns_batch, self.clip_value)
losses, _ = torch_ext.apply_masks([loss], rnn_masks_batch)
loss = losses[0]
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
loss.backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.optimizer.step()
else:
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
else:
self.optimizer.step()
return loss | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/central_value.py | 0.894542 | 0.339773 | central_value.py | pypi |
import torch
import torch.nn as nn
import numpy as np
import rl_games.algos_torch.torch_ext as torch_ext
'''
updates moving statistics with momentum
'''
class MovingMeanStd(nn.Module):
def __init__(self, insize, momentum = 0.25, epsilon=1e-05, per_channel=False, norm_only=False):
super(MovingMeanStd, self).__init__()
self.insize = insize
self.epsilon = epsilon
self.momentum = momentum
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("moving_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("moving_var", torch.ones(in_size, dtype = torch.float64))
def forward(self, input, mask=None, unnorm=False):
if self.training:
if mask is not None:
mean, var = torch_ext.get_mean_std_with_masks(input, mask)
else:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.moving_mean = self.moving_mean * self.momentum + mean * (1 - self.momentum)
self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.moving_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.moving_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.moving_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.moving_mean
current_var = self.moving_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/moving_mean_std.py | 0.910698 | 0.385808 | moving_mean_std.py | pypi |
from rl_games.algos_torch import torch_ext
import torch
import torch.nn as nn
import numpy as np
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = (insize[-1],)
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def forward(self, input, unnorm=False, mask=None):
if self.training:
if mask is not None:
# mean, var = torch_ext.get_mean_std_with_masks(input, mask)
assert False, "not use mask in masa project"
else:
input_resize = input.view(-1,self.insize[-1])
mean = input_resize.mean(self.axis) # along channel axis
var = input_resize.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input_resize.size()[0] )
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean
current_var = self.running_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(isinstance(insize, dict))
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std[k](v, unnorm) for k,v in input.items()}
return res | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/running_mean_std.py | 0.864482 | 0.473536 | running_mean_std.py | pypi |
from rl_games.common.player import BasePlayer
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.tr_helpers import unsqueeze_obs
import gym
import torch
from torch import nn
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class PpoPlayerContinuous(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def get_action(self, obs, is_determenistic = False):
obs = unsqueeze_obs(obs)
if self.config['method'] in ['ppo', 'sappo']:
processed_obs = obs[:,0,:]
elif self.config['method'] in ['mappo', 'masappo']:
processed_obs = obs
else:
raise ValueError(f"method {self.config['method']} not defined")
with torch.no_grad():
current_action = self.model(processed_obs)
current_action = torch.squeeze(current_action.detach())
return torch.clamp(current_action, -1.0, 1.0)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
if type(self.action_space) is gym.spaces.Discrete:
self.actions_num = self.action_space.n
self.is_multi_discrete = False
if type(self.action_space) is gym.spaces.Tuple:
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
self.mask = [False]
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def get_masked_action(self, obs, action_masks, is_determenistic = True):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
action_masks = torch.Tensor(action_masks).to(self.device).bool()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'action_masks' : action_masks,
'rnn_states' : self.states
}
self.model.eval()
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=-1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class SACPlayer(BasePlayer):
def __init__(self, params):
BasePlayer.__init__(self, params)
self.network = self.config['network']
self.actions_num = self.action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = self.obs_shape
self.normalize_input = False
config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'value_size': self.env_info.get('value_size', 1),
'normalize_value': False,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.sac_network.actor.load_state_dict(checkpoint['actor'])
self.model.sac_network.critic.load_state_dict(checkpoint['critic'])
self.model.sac_network.critic_target.load_state_dict(checkpoint['critic_target'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def get_action(self, obs, is_determenistic=False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
dist = self.model.actor(obs)
actions = dist.sample() if is_determenistic else dist.mean
actions = actions.clamp(*self.action_range).to(self.device)
if self.has_batch_dimension == False:
actions = torch.squeeze(actions.detach())
return actions
def reset(self):
pass | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/algos_torch/players.py | 0.757615 | 0.361559 | players.py | pypi |
import gym
import numpy as np
from pettingzoo.sisl import multiwalker_v6
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
import rl_games.envs.connect4_network
class MultiWalker(gym.Env):
def __init__(self, name="multiwalker", **kwargs):
gym.Env.__init__(self)
self.name = name
self.env = multiwalker_v6.parallel_env()
self.use_central_value = kwargs.pop('central_value', False)
self.use_prev_actions = kwargs.pop('use_prev_actions', False)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.add_timeouts = kwargs.pop('add_timeouts', False)
self.action_space = self.env.action_spaces['walker_0']
self.steps_count = 0
obs_len = self.env.observation_spaces['walker_0'].shape[0]
add_obs = 0
if self.apply_agent_ids:
add_obs = 3
if self.use_prev_actions:
obs_len += self.action_space.shape[0]
self.observation_space = gym.spaces.Box(-1, 1, shape =(obs_len + add_obs,))
if self.use_central_value:
self.state_space = gym.spaces.Box(-1, 1, shape =(obs_len*3,))
def step(self, action):
self.steps_count += 1
actions = {'walker_0' : action[0], 'walker_1' : action[1], 'walker_2' : action[2],}
obs, reward, done, info = self.env.step(actions)
if self.use_prev_actions:
obs = {
k: np.concatenate([v, actions[k]]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
rewards = np.stack([reward['walker_0'], reward['walker_1'], reward['walker_2']])
dones = np.stack([done['walker_0'], done['walker_1'], done['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses, rewards, dones, info
def reset(self):
obs = self.env.reset()
self.steps_count = 0
if self.use_prev_actions:
zero_actions = np.zeros(self.action_space.shape[0])
obs = {
k: np.concatenate([v, zero_actions]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses
def render(self, mode='ansi'):
self.env.render(mode)
def get_number_of_agents(self):
return 3
def has_action_mask(self):
return False | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/envs/multiwalker.py | 0.460532 | 0.24608 | multiwalker.py | pypi |
from rl_games.common.ivecenv import IVecEnv
import gym
import numpy as np
import torch.utils.dlpack as tpack
class Envpool(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import envpool
self.batch_size = num_actors
env_name=kwargs.pop('env_name')
self.has_lives = kwargs.pop('has_lives', False)
self.use_dict_obs_space = kwargs.pop('use_dict_obs_space', False)
self.env = envpool.make( env_name,
env_type=kwargs.pop('env_type', 'gym'),
num_envs=num_actors,
batch_size=self.batch_size,
**kwargs
)
if self.use_dict_obs_space:
self.observation_space= gym.spaces.Dict({
'observation' : self.env.observation_space,
'reward' : gym.spaces.Box(low=0, high=1, shape=( ), dtype=np.float32),
'last_action': gym.spaces.Box(low=0, high=self.env.action_space.n, shape=(), dtype=np.long)
})
else:
self.observation_space = self.env.observation_space
self.ids = np.arange(0, num_actors)
self.action_space = self.env.action_space
self.scores = np.zeros(num_actors)
self.returned_scores = np.zeros(num_actors)
def _set_scores(self, infos, dones):
# thanks to cleanrl: https://github.com/vwxyzjn/cleanrl/blob/3d20d11f45a5f1d764934e9851b816d0b03d2d10/cleanrl/ppo_atari_envpool.py#L111
if 'reward' not in infos:
return
self.scores += infos["reward"]
self.returned_scores[:] = self.scores
infos["scores"] = self.returned_scores
if self.has_lives:
all_lives_exhausted = infos["lives"] == 0
self.scores *= 1 - all_lives_exhausted
else:
# removing lives otherwise default observer will use them
if 'lives' in infos:
del infos['lives']
self.scores *= 1 - dones
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action , self.ids)
info['time_outs'] = info['TimeLimit.truncated']
self._set_scores(info, is_done)
if self.use_dict_obs_space:
next_obs = {
'observation': next_obs,
'reward': np.clip(reward, -1, 1),
'last_action': action
}
return next_obs, reward, is_done, info
def reset(self):
obs = self.env.reset(self.ids)
if self.use_dict_obs_space:
obs = {
'observation': obs,
'reward': np.zeros(obs.shape[0]),
'last_action': np.zeros(obs.shape[0]),
}
return obs
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_envpool(**kwargs):
return Envpool("", kwargs.pop('num_actors', 16), **kwargs) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/envs/envpool.py | 0.793546 | 0.182972 | envpool.py | pypi |
import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.action_size = 7
self.conv1 = nn.Conv2d(4, 128, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, s):
s = s['obs'].contiguous()
#s = s.view(-1, 3, 6, 7) # batch_size x channels x board_x x board_y
s = F.relu(self.bn1(self.conv1(s)))
return s
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += residual
out = F.relu(out)
return out
class OutBlock(nn.Module):
def __init__(self):
super(OutBlock, self).__init__()
self.conv = nn.Conv2d(128, 3, kernel_size=1) # value head
self.bn = nn.BatchNorm2d(3)
self.fc1 = nn.Linear(3*6*7, 32)
self.fc2 = nn.Linear(32, 1)
self.conv1 = nn.Conv2d(128, 32, kernel_size=1) # policy head
self.bn1 = nn.BatchNorm2d(32)
self.fc = nn.Linear(6*7*32, 7)
def forward(self,s):
v = F.relu(self.bn(self.conv(s))) # value head
v = v.view(-1, 3*6*7) # batch_size X channel X height X width
v = F.relu(self.fc1(v))
v = F.relu(self.fc2(v))
v = torch.tanh(v)
p = F.relu(self.bn1(self.conv1(s))) # policy head
p = p.view(-1, 6*7*32)
p = self.fc(p)
return p, v, None
class ConnectNet(nn.Module):
def __init__(self, blocks):
super(ConnectNet, self).__init__()
self.blocks = blocks
self.conv = ConvBlock()
for block in range(self.blocks):
setattr(self, "res_%i" % block,ResBlock())
self.outblock = OutBlock()
def is_rnn(self):
return False
def forward(self,s):
s = s.permute((0, 3, 1, 2))
s = self.conv(s)
for block in range(self.blocks):
s = getattr(self, "res_%i" % block)(s)
s = self.outblock(s)
return s
from rl_games.algos_torch.network_builder import NetworkBuilder
class ConnectBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
self.blocks = params['blocks']
def build(self, name, **kwargs):
return ConnectNet(self.blocks)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/envs/connect4_network.py | 0.915847 | 0.421909 | connect4_network.py | pypi |
import torch
import rl_games.algos_torch.torch_ext as torch_ext
class DefaultDiagnostics(object):
def __init__(self):
pass
def send_info(self, writter):
pass
def epoch(self, agent, current_epoch):
pass
def mini_epoch(self, agent, miniepoch):
pass
def mini_batch(self, agent, batch, e_clip, minibatch):
pass
class PpoDiagnostics(DefaultDiagnostics):
def __init__(self):
self.diag_dict = {}
self.clip_fracs = []
self.exp_vars = []
self.current_epoch = 0
def send_info(self, writter):
if writter is None:
return
for k,v in self.diag_dict.items():
writter.add_scalar(k, v.cpu().numpy(), self.current_epoch)
def epoch(self, agent, current_epoch):
self.current_epoch = current_epoch
if agent.normalize_rms_advantage:
self.diag_dict['diagnostics/rms_advantage/mean'] = agent.advantage_mean_std.moving_mean
self.diag_dict['diagnostics/rms_advantage/var'] = agent.advantage_mean_std.moving_var
if agent.normalize_value:
self.diag_dict['diagnostics/rms_value/mean'] = agent.value_mean_std.running_mean
self.diag_dict['diagnostics/rms_value/var'] = agent.value_mean_std.running_var
exp_var = torch.stack(self.exp_vars, axis=0).mean()
self.exp_vars = []
self.diag_dict['diagnostics/exp_var'] = exp_var
def mini_epoch(self, agent, miniepoch):
clip_frac = torch.stack(self.clip_fracs, axis=0).mean()
self.clip_fracs = []
self.diag_dict['diagnostics/clip_frac/{0}'.format(miniepoch)] = clip_frac
def mini_batch(self, agent, batch, e_clip, minibatch):
with torch.no_grad():
values = batch['values'].detach()
returns = batch['returns'].detach()
new_neglogp = batch['new_neglogp'].detach()
old_neglogp = batch['old_neglogp'].detach()
masks = batch['masks']
exp_var = torch_ext.explained_variance(values, returns, masks)
clip_frac = torch_ext.policy_clip_fraction(new_neglogp, old_neglogp, e_clip, masks)
self.exp_vars.append(exp_var)
self.clip_fracs.append(clip_frac) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/diagnostics.py | 0.634317 | 0.234407 | diagnostics.py | pypi |
import time
class IntervalSummaryWriter:
"""
Summary writer wrapper designed to reduce the size of tf.events files.
It will prevent the learner from writing the summaries more often than a specified interval, i.e. if the
current interval is 20 seconds and we wrote our last summary for a particular summary key at 01:00, all summaries
until 01:20 for that key will be ignored.
The interval is adaptive: it will approach 1/200th of the total training time, but no less than interval_sec_min
and no greater than interval_sec_max.
This was created to facilitate really big training runs, such as with Population-Based training, where summary
folders reached tens of gigabytes.
"""
def __init__(self, summary_writer, cfg):
self.experiment_start = time.time()
# prevents noisy summaries when experiments are restarted
self.defer_summaries_sec = cfg.get('defer_summaries_sec', 5)
self.interval_sec_min = cfg.get('summaries_interval_sec_min', 5)
self.interval_sec_max = cfg.get('summaries_interval_sec_max', 300)
self.last_interval = self.interval_sec_min
# interval between summaries will be close to this fraction of the total training time,
# i.e. for a run that lasted 200 minutes we write one summary every minute.
self.summaries_relative_step = 1.0 / 200
self.writer = summary_writer
self.last_write_for_tag = dict()
def _calc_interval(self):
"""Write summaries more often in the beginning of the run."""
if self.last_interval >= self.interval_sec_max:
return self.last_interval
seconds_since_start = time.time() - self.experiment_start
interval = seconds_since_start * self.summaries_relative_step
interval = min(interval, self.interval_sec_max)
interval = max(interval, self.interval_sec_min)
self.last_interval = interval
return interval
def add_scalar(self, tag, value, step, *args, **kwargs):
if step == 0:
# removes faulty summaries that appear after the experiment restart
# print('Skip summaries with step=0')
return
seconds_since_start = time.time() - self.experiment_start
if seconds_since_start < self.defer_summaries_sec:
return
last_write = self.last_write_for_tag.get(tag, 0)
seconds_since_last_write = time.time() - last_write
interval = self._calc_interval()
if seconds_since_last_write >= interval:
self.writer.add_scalar(tag, value, step, *args, **kwargs)
self.last_write_for_tag[tag] = time.time()
def __getattr__(self, attr):
return getattr(self.writer, attr) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/interval_summary_writer.py | 0.730097 | 0.538194 | interval_summary_writer.py | pypi |
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
class AlgoObserver:
def __init__(self):
pass
def before_init(self, base_name, config, experiment_name):
pass
def after_init(self, algo):
pass
def process_infos(self, infos, done_indices):
pass
def after_steps(self):
pass
def after_print_stats(self, frame, epoch_num, total_time):
pass
class DefaultAlgoObserver(AlgoObserver):
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.game_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not infos:
return
done_indices = done_indices.cpu().numpy()
if not isinstance(infos, dict) and len(infos) > 0 and isinstance(infos[0], dict):
for ind in done_indices:
ind = ind.item()
if len(infos) <= ind//self.algo.num_agents:
continue
info = infos[ind//self.algo.num_agents]
game_res = None
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
if game_res is not None:
self.game_scores.update(torch.from_numpy(np.asarray([game_res])).to(self.algo.ppo_device))
elif isinstance(infos, dict):
if 'lives' in infos:
# envpool
done_indices = np.argwhere(infos['lives'] == 0).squeeze(1)
for ind in done_indices:
ind = ind.item()
game_res = None
if 'battle_won' in infos:
game_res = infos['battle_won']
if 'scores' in infos:
game_res = infos['scores']
if game_res is not None and len(game_res) > ind//self.algo.num_agents:
self.game_scores.update(torch.from_numpy(np.asarray([game_res[ind//self.algo.num_agents]])).to(self.algo.ppo_device))
def after_clear_stats(self):
self.game_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.game_scores.current_size > 0 and self.writer is not None:
mean_scores = self.game_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
class IsaacAlgoObserver(AlgoObserver):
"""Log statistics from the environment along with the algorithm running stats."""
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
if not isinstance(infos, dict):
classname = self.__class__.__name__
raise ValueError(f"{classname} expected 'infos' as dict. Received: {type(infos)}")
# store episode information
if "episode" in infos:
self.ep_infos.append(infos["episode"])
# log other variables directly
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_clear_stats(self):
# clear stored buffers
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
# log scalars from the episode
if self.ep_infos:
for key in self.ep_infos[0]:
info_tensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
info_tensor = torch.cat((info_tensor, ep_info[key].to(self.algo.device)))
value = torch.mean(info_tensor)
self.writer.add_scalar("Episode/" + key, value, epoch_num)
self.ep_infos.clear()
# log scalars from env information
for k, v in self.direct_info.items():
self.writer.add_scalar(f"{k}/frame", v, frame)
self.writer.add_scalar(f"{k}/iter", v, epoch_num)
self.writer.add_scalar(f"{k}/time", v, total_time)
# log mean reward/score from the env
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar("scores/mean", mean_scores, frame)
self.writer.add_scalar("scores/iter", mean_scores, epoch_num)
self.writer.add_scalar("scores/time", mean_scores, total_time) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/algo_observer.py | 0.783988 | 0.283186 | algo_observer.py | pypi |
from torch import nn
import torch
import math
def critic_loss(value_preds_batch, values, curr_e_clip, return_batch, clip_value):
if clip_value:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip)
value_losses = (values - return_batch)**2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses,
value_losses_clipped)
else:
c_loss = (return_batch - values)**2
return c_loss
def smooth_clamp(x, mi, mx):
return 1/(1 + torch.exp((-(x-mi)/(mx-mi)+0.5)*4)) * (mx-mi) + mi
def smoothed_actor_loss(old_action_neglog_probs_batch, action_neglog_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_neglog_probs_batch - action_neglog_probs)
surr1 = advantage * ratio
surr2 = advantage * smooth_clamp(ratio, 1.0 - curr_e_clip,
1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_neglog_probs * advantage)
return a_loss
def actor_loss(old_action_neglog_probs_batch, action_neglog_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_neglog_probs_batch - action_neglog_probs)
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip, 1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_neglog_probs * advantage)
return a_loss
def decoupled_actor_loss(behavior_action_neglog_probs, action_neglog_probs, proxy_neglog_probs, advantage, curr_e_clip):
logratio = proxy_neglog_probs - action_neglog_probs
#neglogp_adj = -torch.max(-behavior_action_neglog_probs, -action_neglog_probs.detach() - math.log(100))
pg_losses1 = -advantage * torch.exp(behavior_action_neglog_probs - action_neglog_probs)
clipped_logratio = torch.clamp(logratio, math.log(1.0 - curr_e_clip), math.log(1.0 + curr_e_clip))
pg_losses2 = -advantage * torch.exp(clipped_logratio - proxy_neglog_probs + behavior_action_neglog_probs)
pg_losses = torch.max(pg_losses1,pg_losses2)
return pg_losses | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/common_losses.py | 0.801276 | 0.468608 | common_losses.py | pypi |
import numpy as np
from collections import defaultdict
class LinearValueProcessor:
def __init__(self, start_eps, end_eps, end_eps_frames):
self.start_eps = start_eps
self.end_eps = end_eps
self.end_eps_frames = end_eps_frames
def __call__(self, frame):
if frame >= self.end_eps_frames:
return self.end_eps
df = frame / self.end_eps_frames
return df * self.end_eps + (1.0 - df) * self.start_eps
class DefaultRewardsShaper:
def __init__(self, scale_value = 1, shift_value = 0, min_val=-np.inf, max_val=np.inf, is_torch=True):
self.scale_value = scale_value
self.shift_value = shift_value
self.min_val = min_val
self.max_val = max_val
self.is_torch = is_torch
def __call__(self, reward):
reward = reward + self.shift_value
reward = reward * self.scale_value
if self.is_torch:
import torch
reward = torch.clamp(reward, self.min_val, self.max_val)
else:
reward = np.clip(reward, self.min_val, self.max_val)
return reward
def dicts_to_dict_with_arrays(dicts, add_batch_dim = True):
def stack(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.stack(v)
def concatenate(v):
if len(np.shape(v)) == 1:
return np.array(v)
else:
return np.concatenate(v)
dicts_len = len(dicts)
if(dicts_len <= 1):
return dicts
res = defaultdict(list)
{ res[key].append(sub[key]) for sub in dicts for key in sub }
if add_batch_dim:
concat_func = stack
else:
concat_func = concatenate
res = {k : concat_func(v) for k,v in res.items()}
return res
def unsqueeze_obs(obs):
if type(obs) is dict:
for k,v in obs.items():
obs[k] = unsqueeze_obs(v)
else:
if len(obs.size()) > 1 or obs.size()[0] > 1:
obs = obs.unsqueeze(0)
return obs
def flatten_first_two_dims(arr):
if arr.ndim > 2:
return arr.reshape(-1, *arr.shape[-(arr.ndim-2):])
else:
return arr.reshape(-1)
def free_mem():
import ctypes
ctypes.CDLL('libc.so.6').malloc_trim(0) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/tr_helpers.py | 0.569733 | 0.236406 | tr_helpers.py | pypi |
import rl_games.envs.test
from rl_games.common import wrappers
from rl_games.common import tr_helpers
from rl_games.envs.brax import create_brax_env
from rl_games.envs.envpool import create_envpool
import gym
from gym.wrappers import FlattenObservation, FilterObservation
import numpy as np
import math
class HCRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.max([-10, reward])
class DMControlWrapper(gym.Wrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.observation_space = self.env.observation_space['observations']
self.observation_space.dtype = np.dtype('float32')
def reset(self, **kwargs):
self.num_stops = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, reward, done, info
class DMControlObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def observation(self, obs):
return obs['observations']
def create_default_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
is_procgen = kwargs.pop('procgen', False)
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
if frames > 1:
if is_procgen:
env = wrappers.ProcgenStack(env, frames, True)
else:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_goal_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal']))
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_slime_gym_env(**kwargs):
import slimevolleygym
from rl_games.envs.slimevolley_selfplay import SlimeVolleySelfplay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = SlimeVolleySelfplay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_connect_four_env(**kwargs):
from rl_games.envs.connect4_selfplay import ConnectFourSelfPlay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = ConnectFourSelfPlay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_atari_gym_env(**kwargs):
#frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
skip = kwargs.pop('skip',4)
episode_life = kwargs.pop('episode_life',True)
wrap_impala = kwargs.pop('wrap_impala', False)
env = wrappers.make_atari_deepmind(name, skip=skip,episode_life=episode_life, wrap_impala=wrap_impala, **kwargs)
return env
def create_dm_control_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = 'dm2gym:'+ kwargs.pop('name')
env = gym.make(name, environment_kwargs=kwargs)
env = DMControlWrapper(env)
env = DMControlObsWrapper(env)
env = wrappers.TimeLimit(env, 1000)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
def create_super_mario_env(name='SuperMarioBros-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
env = gym_super_mario_bros.make(name)
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
return env
def create_super_mario_env_stage1(name='SuperMarioBrosRandomStage1-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
stage_names = [
'SuperMarioBros-1-1-v1',
'SuperMarioBros-1-2-v1',
'SuperMarioBros-1-3-v1',
'SuperMarioBros-1-4-v1',
]
env = gym_super_mario_bros.make(stage_names[1])
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
#env = wrappers.AllowBacktracking(env)
return env
def create_quadrupped_env():
import gym
import roboschool
import quadruppedEnv
return wrappers.FrameStack(wrappers.MaxAndSkipEnv(gym.make('QuadruppedWalk-v1'), 4, False), 2, True)
def create_roboschool_env(name):
import gym
import roboschool
return gym.make(name)
def create_smac(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
frames = kwargs.pop('frames', 1)
transpose = kwargs.pop('transpose', False)
flatten = kwargs.pop('flatten', True)
has_cv = kwargs.get('central_value', False)
env = SMACEnv(name, **kwargs)
if frames > 1:
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=False, flatten=flatten)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=False, flatten=flatten)
return env
def create_smac_cnn(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
has_cv = kwargs.get('central_value', False)
frames = kwargs.pop('frames', 4)
transpose = kwargs.pop('transpose', False)
env = SMACEnv(name, **kwargs)
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=transpose)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=transpose)
return env
def create_test_env(name, **kwargs):
import rl_games.envs.test
env = gym.make(name, **kwargs)
return env
def create_minigrid_env(name, **kwargs):
import gym_minigrid
import gym_minigrid.wrappers
state_bonus = kwargs.pop('state_bonus', False)
action_bonus = kwargs.pop('action_bonus', False)
rgb_fully_obs = kwargs.pop('rgb_fully_obs', False)
rgb_partial_obs = kwargs.pop('rgb_partial_obs', True)
view_size = kwargs.pop('view_size', 3)
env = gym.make(name, **kwargs)
if state_bonus:
env = gym_minigrid.wrappers.StateBonus(env)
if action_bonus:
env = gym_minigrid.wrappers.ActionBonus(env)
if rgb_fully_obs:
env = gym_minigrid.wrappers.RGBImgObsWrapper(env)
elif rgb_partial_obs:
env = gym_minigrid.wrappers.ViewSizeWrapper(env, view_size)
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env, tile_size=84//view_size) # Get pixel observations
env = gym_minigrid.wrappers.ImgObsWrapper(env)
print('minigird_env observation space shape:', env.observation_space)
return env
def create_multiwalker_env(**kwargs):
from rl_games.envs.multiwalker import MultiWalker
env = MultiWalker('', **kwargs)
return env
def create_diambra_env(**kwargs):
from rl_games.envs.diambra.diambra import DiambraEnv
env = DiambraEnv(**kwargs)
return env
def create_env(name, **kwargs):
steps_limit = kwargs.pop('steps_limit', None)
env = gym.make(name, **kwargs)
if steps_limit is not None:
env = wrappers.TimeLimit(env, steps_limit)
return env
configurations = {
'CartPole-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('CartPole-v1'),
},
'CartPoleMaskedVelocity-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : wrappers.MaskVelocityWrapper(gym.make('CartPole-v1'), 'CartPole-v1'),
},
'MountainCarContinuous-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('MountainCarContinuous-v0'),
},
'MountainCar-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda : gym.make('MountainCar-v0'),
},
'Acrobot-v1' : {
'env_creator' : lambda **kwargs : gym.make('Acrobot-v1'),
'vecenv_type' : 'RAY'
},
'Pendulum-v0' : {
'env_creator' : lambda **kwargs : gym.make('Pendulum-v0'),
'vecenv_type' : 'RAY'
},
'LunarLander-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLander-v2'),
'vecenv_type' : 'RAY'
},
'PongNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('PongNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'BreakoutNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('BreakoutNoFrameskip-v4', skip=4,sticky=False),
'vecenv_type' : 'RAY'
},
'MsPacmanNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('MsPacmanNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'CarRacing-v0' : {
'env_creator' : lambda **kwargs : wrappers.make_car_racing('CarRacing-v0', skip=4),
'vecenv_type' : 'RAY'
},
'RoboschoolAnt-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolAnt-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBros-v1' : {
'env_creator' : lambda : create_super_mario_env(),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStages-v1' : {
'env_creator' : lambda : create_super_mario_env('SuperMarioBrosRandomStages-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStage1-v1' : {
'env_creator' : lambda **kwargs : create_super_mario_env_stage1('SuperMarioBrosRandomStage1-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHalfCheetah-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolHalfCheetah-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoid-v1' : {
'env_creator' : lambda : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoid-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'LunarLanderContinuous-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLanderContinuous-v2'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoidFlagrun-v1' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoidFlagrun-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'BipedalWalker-v3' : {
'env_creator' : lambda **kwargs : create_env('BipedalWalker-v3', **kwargs),
'vecenv_type' : 'RAY'
},
'BipedalWalkerCnn-v3' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(HCRewardEnv(gym.make('BipedalWalker-v3')), 4, False),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcore-v3' : {
'env_creator' : lambda **kwargs : gym.make('BipedalWalkerHardcore-v3'),
'vecenv_type' : 'RAY'
},
'ReacherPyBulletEnv-v0' : {
'env_creator' : lambda **kwargs : create_roboschool_env('ReacherPyBulletEnv-v0'),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcoreCnn-v3' : {
'env_creator' : lambda : wrappers.FrameStack(gym.make('BipedalWalkerHardcore-v3'), 4, False),
'vecenv_type' : 'RAY'
},
'QuadruppedWalk-v1' : {
'env_creator' : lambda **kwargs : create_quadrupped_env(),
'vecenv_type' : 'RAY'
},
'FlexAnt' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/ant.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoid' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoidHard' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid_hard.yaml'),
'vecenv_type' : 'ISAAC'
},
'smac' : {
'env_creator' : lambda **kwargs : create_smac(**kwargs),
'vecenv_type' : 'RAY'
},
'smac_cnn' : {
'env_creator' : lambda **kwargs : create_smac_cnn(**kwargs),
'vecenv_type' : 'RAY'
},
'dm_control' : {
'env_creator' : lambda **kwargs : create_dm_control_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_gym' : {
'env_creator' : lambda **kwargs : create_default_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_robot_gym' : {
'env_creator' : lambda **kwargs : create_goal_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'atari_gym' : {
'env_creator' : lambda **kwargs : create_atari_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'slime_gym' : {
'env_creator' : lambda **kwargs : create_slime_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'test_env' : {
'env_creator' : lambda **kwargs : create_test_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'minigrid_env' : {
'env_creator' : lambda **kwargs : create_minigrid_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'connect4_env' : {
'env_creator' : lambda **kwargs : create_connect_four_env(**kwargs),
'vecenv_type' : 'RAY'
},
'multiwalker_env' : {
'env_creator' : lambda **kwargs : create_multiwalker_env(**kwargs),
'vecenv_type' : 'RAY'
},
'diambra': {
'env_creator': lambda **kwargs: create_diambra_env(**kwargs),
'vecenv_type': 'RAY'
},
'brax' : {
'env_creator': lambda **kwargs: create_brax_env(**kwargs),
'vecenv_type': 'BRAX'
},
'envpool': {
'env_creator': lambda **kwargs: create_envpool(**kwargs),
'vecenv_type': 'ENVPOOL'
},
}
def get_env_info(env):
result_shapes = {}
result_shapes['observation_space'] = env.observation_space
result_shapes['action_space'] = env.action_space
result_shapes['agents'] = 1
result_shapes['value_size'] = 1
if hasattr(env, "get_number_of_agents"):
result_shapes['agents'] = env.get_number_of_agents()
'''
if isinstance(result_shapes['observation_space'], gym.spaces.dict.Dict):
result_shapes['observation_space'] = observation_space['observations']
if isinstance(result_shapes['observation_space'], dict):
result_shapes['observation_space'] = observation_space['observations']
result_shapes['state_space'] = observation_space['states']
'''
if hasattr(env, "value_size"):
result_shapes['value_size'] = env.value_size
print(result_shapes)
return result_shapes
def get_obs_and_action_spaces_from_config(config):
env_config = config.get('env_config', {})
env = configurations[config['env_name']]['env_creator'](**env_config)
result_shapes = get_env_info(env)
env.close()
return result_shapes
def register(name, config):
configurations[name] = config | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/env_configurations.py | 0.520009 | 0.404213 | env_configurations.py | pypi |
import numpy as np
import random
import gym
import torch
from rl_games.common.segment_tree import SumSegmentTree, MinSegmentTree
import torch
from rl_games.algos_torch.torch_ext import numpy_to_torch_dtype_dict
class ReplayBuffer(object):
def __init__(self, size, ob_space):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._next_obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._rewards = np.zeros(size)
self._actions = np.zeros(size, dtype=np.int32)
self._dones = np.zeros(size, dtype=np.bool)
self._maxsize = size
self._next_idx = 0
self._curr_size = 0
def __len__(self):
return self._curr_size
def add(self, obs_t, action, reward, obs_tp1, done):
self._curr_size = min(self._curr_size + 1, self._maxsize )
self._obses[self._next_idx] = obs_t
self._next_obses[self._next_idx] = obs_tp1
self._rewards[self._next_idx] = reward
self._actions[self._next_idx] = action
self._dones[self._next_idx] = done
self._next_idx = (self._next_idx + 1) % self._maxsize
def _get(self, idx):
return self._obses[idx], self._actions[idx], self._rewards[idx], self._next_obses[idx], self._dones[idx]
def _encode_sample(self, idxes):
batch_size = len(idxes)
obses_t, actions, rewards, obses_tp1, dones = [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size
it = 0
for i in idxes:
data = self._get(i)
obs_t, action, reward, obs_tp1, done = data
obses_t[it] = np.array(obs_t, copy=False)
actions[it] = np.array(action, copy=False)
rewards[it] = reward
obses_tp1[it] = np.array(obs_tp1, copy=False)
dones[it] = done
it = it + 1
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, self._curr_size - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha, ob_space):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size, ob_space)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, self._curr_size - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * self._curr_size) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * self._curr_size) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < self._curr_size
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class VectorizedReplayBuffer:
def __init__(self, obs_shape, action_shape, capacity, device):
"""Create Vectorized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
See Also
--------
ReplayBuffer.__init__
"""
self.device = device
self.obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.next_obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.actions = torch.empty((capacity, *action_shape), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((capacity, 1), dtype=torch.float32, device=self.device)
self.dones = torch.empty((capacity, 1), dtype=torch.bool, device=self.device)
self.capacity = capacity
self.idx = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
num_observations = obs.shape[0]
remaining_capacity = min(self.capacity - self.idx, num_observations)
overflow = num_observations - remaining_capacity
if remaining_capacity < num_observations:
self.obses[0: overflow] = obs[-overflow:]
self.actions[0: overflow] = action[-overflow:]
self.rewards[0: overflow] = reward[-overflow:]
self.next_obses[0: overflow] = next_obs[-overflow:]
self.dones[0: overflow] = done[-overflow:]
self.full = True
self.obses[self.idx: self.idx + remaining_capacity] = obs[:remaining_capacity]
self.actions[self.idx: self.idx + remaining_capacity] = action[:remaining_capacity]
self.rewards[self.idx: self.idx + remaining_capacity] = reward[:remaining_capacity]
self.next_obses[self.idx: self.idx + remaining_capacity] = next_obs[:remaining_capacity]
self.dones[self.idx: self.idx + remaining_capacity] = done[:remaining_capacity]
self.idx = (self.idx + num_observations) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obses: torch tensor
batch of observations
actions: torch tensor
batch of actions executed given obs
rewards: torch tensor
rewards received as results of executing act_batch
next_obses: torch tensor
next set of observations seen after executing act_batch
not_dones: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not
not_dones_no_max: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps
"""
idxs = torch.randint(0,
self.capacity if self.full else self.idx,
(batch_size,), device=self.device)
obses = self.obses[idxs]
actions = self.actions[idxs]
rewards = self.rewards[idxs]
next_obses = self.next_obses[idxs]
dones = self.dones[idxs]
return obses, actions, rewards, next_obses, dones
class ExperienceBuffer:
'''
More generalized than replay buffers.
Implemented for on-policy algos
'''
def __init__(self, env_info, algo_info, device, aux_tensor_dict=None):
self.env_info = env_info
self.algo_info = algo_info
self.device = device
self.num_agents = env_info.get('agents', 1)
self.action_space = env_info['action_space']
self.num_actors = algo_info['num_actors']
self.horizon_length = algo_info['horizon_length']
self.has_central_value = algo_info['has_central_value']
self.use_action_masks = algo_info.get('use_action_masks', False)
batch_size = self.num_actors * self.num_agents
self.is_discrete = False
self.is_multi_discrete = False
self.is_continuous = False
self.obs_base_shape = (self.horizon_length, self.num_agents * self.num_actors)
self.state_base_shape = (self.horizon_length, self.num_actors)
if type(self.action_space) is gym.spaces.Discrete:
self.actions_shape = ()
self.actions_num = self.action_space.n
self.is_discrete = True
if type(self.action_space) is gym.spaces.Tuple:
self.actions_shape = (len(self.action_space),)
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
if type(self.action_space) is gym.spaces.Box:
self.actions_shape = (self.action_space.shape[0],)
self.actions_num = self.action_space.shape[0]
self.is_continuous = True
self.tensor_dict = {}
self._init_from_env_info(self.env_info)
self.aux_tensor_dict = aux_tensor_dict
if self.aux_tensor_dict is not None:
self._init_from_aux_dict(self.aux_tensor_dict)
def _init_from_env_info(self, env_info):
obs_base_shape = self.obs_base_shape
state_base_shape = self.state_base_shape
self.tensor_dict['obses'] = self._create_tensor_from_space(env_info['observation_space'], obs_base_shape)
if self.has_central_value:
self.tensor_dict['states'] = self._create_tensor_from_space(env_info['state_space'], state_base_shape)
val_space = gym.spaces.Box(low=0, high=1,shape=(env_info.get('value_size',1),))
self.tensor_dict['rewards'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['values'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['neglogpacs'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.float32), obs_base_shape)
self.tensor_dict['dones'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.uint8), obs_base_shape)
if self.is_discrete or self.is_multi_discrete:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.long), obs_base_shape)
if self.use_action_masks:
self.tensor_dict['action_masks'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape + (np.sum(self.actions_num),), dtype=np.bool), obs_base_shape)
if self.is_continuous:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['mus'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['sigmas'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
def _init_from_aux_dict(self, tensor_dict):
obs_base_shape = self.obs_base_shape
for k,v in tensor_dict.items():
self.tensor_dict[k] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(v), dtype=np.float32), obs_base_shape)
def _create_tensor_from_space(self, space, base_shape):
if type(space) is gym.spaces.Box:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape + space.shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Discrete:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Tuple:
'''
assuming that tuple is only Discrete tuple
'''
dtype = numpy_to_torch_dtype_dict[space.dtype]
tuple_len = len(space)
return torch.zeros(base_shape +(tuple_len,), dtype= dtype, device = self.device)
if type(space) is gym.spaces.Dict:
t_dict = {}
for k,v in space.spaces.items():
t_dict[k] = self._create_tensor_from_space(v, base_shape)
return t_dict
def update_data(self, name, index, val):
if type(val) is dict:
for k,v in val.items():
self.tensor_dict[name][k][index,:] = v
else:
self.tensor_dict[name][index,:] = val
def update_data_rnn(self, name, indices,play_mask, val):
if type(val) is dict:
for k,v in val:
self.tensor_dict[name][k][indices,play_mask] = v
else:
self.tensor_dict[name][indices,play_mask] = val
def get_transformed(self, transform_op):
res_dict = {}
for k, v in self.tensor_dict.items():
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
def get_transformed_list(self, transform_op, tensor_list):
res_dict = {}
for k in tensor_list:
v = self.tensor_dict.get(k)
if v is None:
continue
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/experience.py | 0.88666 | 0.529081 | experience.py | pypi |
import torch
import copy
from torch.utils.data import Dataset
class PPODataset(Dataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
self.is_rnn = is_rnn
self.seq_len = seq_len
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
self.length = self.batch_size // self.minibatch_size
self.is_discrete = is_discrete
self.is_continuous = not is_discrete
total_games = self.batch_size // self.seq_len
self.num_games_batch = self.minibatch_size // self.seq_len
self.game_indexes = torch.arange(total_games, dtype=torch.long, device=self.device)
self.flat_indexes = torch.arange(total_games * self.seq_len, dtype=torch.long, device=self.device).reshape(total_games, self.seq_len)
self.special_names = ['rnn_states']
def update_values_dict(self, values_dict):
self.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
start = self.last_range[0]
end = self.last_range[1]
self.values_dict['mu'][start:end] = mu
self.values_dict['sigma'][start:end] = sigma
def __len__(self):
return self.length
def _get_item_rnn(self, idx):
gstart = idx * self.num_games_batch
gend = (idx + 1) * self.num_games_batch
start = gstart * self.seq_len
end = gend * self.seq_len
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names:
if isinstance(v, dict):
v_dict = {kd:vd[start:end] for kd, vd in v.items()}
input_dict[k] = v_dict
else:
if v is not None:
input_dict[k] = v[start:end]
else:
input_dict[k] = None
rnn_states = self.values_dict['rnn_states']
input_dict['rnn_states'] = [s[:, gstart:gend, :].contiguous() for s in rnn_states]
return input_dict
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
self.last_range = (start, end)
input_dict = {}
for k,v in self.values_dict.items():
if k not in self.special_names and v is not None:
if type(v) is dict:
v_dict = { kd:vd[start:end] for kd, vd in v.items() }
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
sample = self._get_item_rnn(idx)
else:
sample = self._get_item(idx)
return sample
class DatasetList(Dataset):
def __init__(self):
self.dataset_list = []
def __len__(self):
return self.dataset_list[0].length * len(self.dataset_list)
def add_dataset(self, dataset):
self.dataset_list.append(copy.deepcopy(dataset))
def clear(self):
self.dataset_list = []
def __getitem__(self, idx):
ds_len = len(self.dataset_list)
ds_idx = idx % ds_len
in_idx = idx // ds_len
return self.dataset_list[ds_idx].__getitem__(in_idx) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/datasets.py | 0.635222 | 0.291996 | datasets.py | pypi |
import torch
from torch import nn
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def multiply_hidden(h, mask):
if isinstance(h, torch.Tensor):
return h * mask
else:
return tuple(multiply_hidden(v, mask) for v in h)
class RnnWithDones(nn.Module):
def __init__(self, rnn_layer):
nn.Module.__init__(self)
self.rnn = rnn_layer
#got idea from ikostrikov :)
def forward(self, input, states, done_masks=None, bptt_len = 0):
# ignoring bptt_ln for now
if done_masks == None:
return self.rnn(input, states)
max_steps = input.size()[0]
batch_size = input.size()[1]
out_batch = []
not_dones = 1.0-done_masks
has_zeros = ((not_dones.squeeze()[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 an`d t=T to the list
has_zeros = [0] + has_zeros + [max_steps]
out_batch = []
for i in range(len(has_zeros) - 1):
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
not_done = not_dones[start_idx].float().unsqueeze(0)
states = multiply_hidden(states, not_done)
out, states = self.rnn(input[start_idx:end_idx], states)
out_batch.append(out)
return torch.cat(out_batch, dim=0), states
"""
def forward(self, input, states, done_masks=None, bptt_len = 0):
max_steps = input.size()[0]
batch_size = input.size()[1]
out_batch = []
for i in range(max_steps):
if done_masks is not None:
dones = done_masks[i].float().unsqueeze(0)
states = multiply_hidden(states, 1.0-dones)
if (bptt_len > 0) and (i % bptt_len == 0):
states = repackage_hidden(states)
out, states = self.rnn(input[i].unsqueeze(0), states)
out_batch.append(out)
return torch.cat(out_batch, dim=0), states
"""
class LSTMWithDones(RnnWithDones):
def __init__(self, *args, **kwargs):
lstm = torch.nn.LSTM(*args, **kwargs)
RnnWithDones.__init__(self, lstm)
class GRUWithDones(RnnWithDones):
def __init__(self, *args, **kwargs):
gru = torch.nn.GRU(*args,**kwargs)
RnnWithDones.__init__(self, gru) | /rl_games_y-0.0.8.tar.gz/rl_games_y-0.0.8/rl_games/common/layers/recurrent.py | 0.879244 | 0.37691 | recurrent.py | pypi |
# RL Games: High performance RL library
## Discord Channel Link
* https://discord.gg/hnYRq7DsQh
## Papers and related links
* Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning: https://arxiv.org/abs/2108.10470
* DeXtreme: Transfer of Agile In-Hand Manipulation from Simulation to Reality: https://dextreme.org/ https://arxiv.org/abs/2210.13702
* Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger: https://s2r2-ig.github.io/ https://arxiv.org/abs/2108.09779
* Is Independent Learning All You Need in the StarCraft Multi-Agent Challenge? <https://arxiv.org/abs/2011.09533>
* Superfast Adversarial Motion Priors (AMP) implementation: https://twitter.com/xbpeng4/status/1506317490766303235 https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
* OSCAR: Data-Driven Operational Space Control for Adaptive and Robust Robot Manipulation: https://cremebrule.github.io/oscar-web/ https://arxiv.org/abs/2110.00704
* EnvPool: A Highly Parallel Reinforcement Learning Environment Execution Engine: https://arxiv.org/abs/2206.10558 and https://github.com/sail-sg/envpool
* TimeChamber: A Massively Parallel Large Scale Self-Play Framework: https://github.com/inspirai/TimeChamber
## Some results on the different environments
* [NVIDIA Isaac Gym](docs/ISAAC_GYM.md)




* [Dextreme](https://dextreme.org/)

* [Starcraft 2 Multi Agents](docs/SMAC.md)
* [BRAX](docs/BRAX.md)
* [Mujoco Envpool](docs/MUJOCO_ENVPOOL.md)
* [Atari Envpool](docs/ATARI_ENVPOOL.md)
* [Random Envs](docs/OTHER.md)
Implemented in Pytorch:
* PPO with the support of asymmetric actor-critic variant
* Support of end-to-end GPU accelerated training pipeline with Isaac Gym and Brax
* Masked actions support
* Multi-agent training, decentralized and centralized critic variants
* Self-play
Implemented in Tensorflow 1.x (was removed in this version):
* Rainbow DQN
* A2C
* PPO
## Quickstart: Colab in the Cloud
Explore RL Games quick and easily in colab notebooks:
* [Mujoco training](https://colab.research.google.com/github/Denys88/rl_games/blob/master/notebooks/mujoco_envpool_training.ipynb) Mujoco envpool training example.
* [Brax training](https://colab.research.google.com/github/Denys88/rl_games/blob/master/notebooks/brax_training.ipynb) Brax training example, with keeping all the observations and actions on GPU.
* [Onnx discrete space export example with Cartpole](https://colab.research.google.com/github/Denys88/rl_games/blob/master/notebooks/train_and_export_onnx_example_discrete.ipynb) envpool training example.
* [Onnx continuous space export example with Pendulum](https://colab.research.google.com/github/Denys88/rl_games/blob/master/notebooks/train_and_export_onnx_example_continuous.ipynb) envpool training example.
## Installation
For maximum training performance a preliminary installation of Pytorch 1.9+ with CUDA 11.1+ is highly recommended:
```conda install pytorch torchvision cudatoolkit=11.3 -c pytorch -c nvidia``` or:
```pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html```
Then:
```pip install rl-games```
To run CPU-based environments either Ray or envpool are required ```pip install envpool``` or ```pip install ray```
To run Mujoco, Atari games or Box2d based environments training they need to be additionally installed with ```pip install gym[mujoco]```, ```pip install gym[atari]``` or ```pip install gym[box2d]``` respectively.
To run Atari also ```pip install opencv-python``` is required. In addition installation of envpool for maximum simulation and training perfromance of Mujoco and Atari environments is highly recommended: ```pip install envpool```
## Citing
If you use rl-games in your research please use the following citation:
```bibtex
@misc{rl-games2021,
title = {rl-games: A High-performance Framework for Reinforcement Learning},
author = {Makoviichuk, Denys and Makoviychuk, Viktor},
month = {May},
year = {2021},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/Denys88/rl_games}},
}
```
## Development setup
```bash
poetry install
# install cuda related dependencies
poetry run pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
```
## Training
**NVIDIA Isaac Gym**
Download and follow the installation instructions of Isaac Gym: https://developer.nvidia.com/isaac-gym
And IsaacGymEnvs: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
*Ant*
```python train.py task=Ant headless=True```
```python train.py task=Ant test=True checkpoint=nn/Ant.pth num_envs=100```
*Humanoid*
```python train.py task=Humanoid headless=True```
```python train.py task=Humanoid test=True checkpoint=nn/Humanoid.pth num_envs=100```
*Shadow Hand block orientation task*
```python train.py task=ShadowHand headless=True```
```python train.py task=ShadowHand test=True checkpoint=nn/ShadowHand.pth num_envs=100```
**Other**
*Atari Pong*
```bash
poetry install -E atari
poetry run python runner.py --train --file rl_games/configs/atari/ppo_pong.yaml
poetry run python runner.py --play --file rl_games/configs/atari/ppo_pong.yaml --checkpoint nn/PongNoFrameskip.pth
```
*Brax Ant*
```bash
poetry install -E brax
poetry run pip install --upgrade "jax[cuda]==0.3.13" -f https://storage.googleapis.com/jax-releases/jax_releases.html
poetry run python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml
poetry run python runner.py --play --file rl_games/configs/brax/ppo_ant.yaml --checkpoint runs/Ant_brax/nn/Ant_brax.pth
```
## Experiment tracking
rl_games support experiment tracking with [Weights and Biases](https://wandb.ai).
```bash
poetry install -E atari
poetry run python runner.py --train --file rl_games/configs/atari/ppo_breakout_torch.yaml --track
WANDB_API_KEY=xxxx poetry run python runner.py --train --file rl_games/configs/atari/ppo_breakout_torch.yaml --track
poetry run python runner.py --train --file rl_games/configs/atari/ppo_breakout_torch.yaml --wandb-project-name rl-games-special-test --track
poetry run python runner.py --train --file rl_games/configs/atari/ppo_breakout_torch.yaml --wandb-project-name rl-games-special-test -wandb-entity openrlbenchmark --track
```
## Multi GPU
We use `torchrun` to orchestrate any multi-gpu runs.
```bash
torchrun --standalone --nnodes=1 --nproc_per_node=2 runner.py --train --file rl_games/configs/ppo_cartpole.yaml
```
## Config Parameters
| Field | Example Value | Default | Description |
| ---------------------- | ------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| seed | 8 | None | Seed for pytorch, numpy etc. |
| algo | | | Algorithm block. |
| name | a2c_continuous | None | Algorithm name. Possible values are: sac, a2c_discrete, a2c_continuous |
| model | | | Model block. |
| name | continuous_a2c_logstd | None | Possible values: continuous_a2c ( expects sigma to be (0, +inf), continuous_a2c_logstd ( expects sigma to be (-inf, +inf), a2c_discrete, a2c_multi_discrete |
| network | | | Network description. |
| name | actor_critic | | Possible values: actor_critic or soft_actor_critic. |
| separate | False | | Whether use or not separate network with same same architecture for critic. In almost all cases if you normalize value it is better to have it False |
| space | | | Network space |
| continuous | | | continuous or discrete |
| mu_activation | None | | Activation for mu. In almost all cases None works the best, but we may try tanh. |
| sigma_activation | None | | Activation for sigma. Will be threated as log(sigma) or sigma depending on model. |
| mu_init | | | Initializer for mu. |
| name | default | | |
| sigma_init | | | Initializer for sigma. if you are using logstd model good value is 0. |
| name | const_initializer | | |
| val | 0 | | |
| fixed_sigma | True | | If true then sigma vector doesn't depend on input. |
| cnn | | | Convolution block. |
| type | conv2d | | Type: right now two types supported: conv2d or conv1d |
| activation | elu | | activation between conv layers. |
| initializer | | | Initialier. I took some names from the tensorflow. |
| name | glorot_normal_initializer | | Initializer name |
| gain | 1.4142 | | Additional parameter. |
| convs | | | Convolution layers. Same parameters as we have in torch. |
| filters | 32 | | Number of filters. |
| kernel_size | 8 | | Kernel size. |
| strides | 4 | | Strides |
| padding | 0 | | Padding |
| filters | 64 | | Next convolution layer info. |
| kernel_size | 4 | | |
| strides | 2 | | |
| padding | 0 | | |
| filters | 64 | | |
| kernel_size | 3 | | |
| strides | 1 | | |
| padding | 0 | |
| mlp | | | MLP Block. Convolution is supported too. See other config examples. |
| units | | | Array of sizes of the MLP layers, for example: [512, 256, 128] |
| d2rl | False | | Use d2rl architecture from https://arxiv.org/abs/2010.09163. |
| activation | elu | | Activations between dense layers. |
| initializer | | | Initializer. |
| name | default | | Initializer name. |
| rnn | | | RNN block. |
| name | lstm | | RNN Layer name. lstm and gru are supported. |
| units | 256 | | Number of units. |
| layers | 1 | | Number of layers |
| before_mlp | False | False | Apply rnn before mlp block or not. |
| config | | | RL Config block. |
| reward_shaper | | | Reward Shaper. Can apply simple transformations. |
| min_val | -1 | | You can apply min_val, max_val, scale and shift. |
| scale_value | 0.1 | 1 | |
| normalize_advantage | True | True | Normalize Advantage. |
| gamma | 0.995 | | Reward Discount |
| tau | 0.95 | | Lambda for GAE. Called tau by mistake long time ago because lambda is keyword in python :( |
| learning_rate | 3e-4 | | Learning rate. |
| name | walker | | Name which will be used in tensorboard. |
| save_best_after | 10 | | How many epochs to wait before start saving checkpoint with best score. |
| score_to_win | 300 | | If score is >=value then this value training will stop. |
| grad_norm | 1.5 | | Grad norm. Applied if truncate_grads is True. Good value is in (1.0, 10.0) |
| entropy_coef | 0 | | Entropy coefficient. Good value for continuous space is 0. For discrete is 0.02 |
| truncate_grads | True | | Apply truncate grads or not. It stabilizes training. |
| env_name | BipedalWalker-v3 | | Envinronment name. |
| e_clip | 0.2 | | clip parameter for ppo loss. |
| clip_value | False | | Apply clip to the value loss. If you are using normalize_value you don't need it. |
| num_actors | 16 | | Number of running actors/environments. |
| horizon_length | 4096 | | Horizon length per each actor. Total number of steps will be num_actors*horizon_length * num_agents (if env is not MA num_agents==1). |
| minibatch_size | 8192 | | Minibatch size. Total number number of steps must be divisible by minibatch size. |
| minibatch_size_per_env | 8 | | Minibatch size per env. If specified will overwrite total number number the default minibatch size with minibatch_size_per_env * nume_envs value. |
| mini_epochs | 4 | | Number of miniepochs. Good value is in [1,10] |
| critic_coef | 2 | | Critic coef. by default critic_loss = critic_coef * 1/2 * MSE. |
| lr_schedule | adaptive | None | Scheduler type. Could be None, linear or adaptive. Adaptive is the best for continuous control tasks. Learning rate is changed changed every miniepoch |
| kl_threshold | 0.008 | | KL threshould for adaptive schedule. if KL < kl_threshold/2 lr = lr * 1.5 and opposite. |
| normalize_input | True | | Apply running mean std for input. |
| bounds_loss_coef | 0.0 | | Coefficient to the auxiary loss for continuous space. |
| max_epochs | 10000 | | Maximum number of epochs to run. |
| max_frames | 5000000 | | Maximum number of frames (env steps) to run. |
| normalize_value | True | | Use value running mean std normalization. |
| use_diagnostics | True | | Adds more information into the tensorboard. |
| value_bootstrap | True | | Bootstraping value when episode is finished. Very useful for different locomotion envs. |
| bound_loss_type | regularisation | None | Adds aux loss for continuous case. 'regularisation' is the sum of sqaured actions. 'bound' is the sum of actions higher than 1.1. |
| bounds_loss_coef | 0.0005 | 0 | Regularisation coefficient |
| use_smooth_clamp | False | | Use smooth clamp instead of regular for cliping |
| zero_rnn_on_done | False | True | If False RNN internal state is not reset (set to 0) when an environment is rest. Could improve training in some cases, for example when domain randomization is on |
| player | | | Player configuration block. |
| render | True | False | Render environment |
| deterministic | True | True | Use deterministic policy ( argmax or mu) or stochastic. |
| use_vecenv | True | False | Use vecenv to create environment for player |
| games_num | 200 | | Number of games to run in the player mode. |
| env_config | | | Env configuration block. It goes directly to the environment. This example was take for my atari wrapper. |
| skip | 4 | | Number of frames to skip |
| name | BreakoutNoFrameskip-v4 | | The exact name of an (atari) gym env. An example, depends on the training env this parameters can be different. |
## Custom network example:
[simple test network](rl_games/envs/test_network.py)
This network takes dictionary observation.
To register it you can add code in your __init__.py
```
from rl_games.envs.test_network import TestNetBuilder
from rl_games.algos_torch import model_builder
model_builder.register_network('testnet', TestNetBuilder)
```
[simple test environment](rl_games/envs/test/rnn_env.py)
[example environment](rl_games/envs/test/example_env.py)
Additional environment supported properties and functions
| Field | Default Value | Description |
| -------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| use_central_value | False | If true than returned obs is expected to be dict with 'obs' and 'state' |
| value_size | 1 | Shape of the returned rewards. Network wil support multihead value automatically. |
| concat_infos | False | Should default vecenv convert list of dicts to the dicts of lists. Very usefull if you want to use value_boostrapping. in this case you need to always return 'time_outs' : True or False, from the env. |
| get_number_of_agents(self) | 1 | Returns number of agents in the environment |
| has_action_mask(self) | False | Returns True if environment has invalid actions mask. |
| get_action_mask(self) | None | Returns action masks if has_action_mask is true. Good example is [SMAC Env](rl_games/envs/test/smac_env.py) |
## Release Notes
1.6.0
* Added ONNX export colab example for discrete and continious action spaces. For continuous case LSTM policy example is provided as well.
* Improved RNNs training in continuous space, added option `zero_rnn_on_done`.
* Added NVIDIA CuLE support: https://github.com/NVlabs/cule
* Added player config everride. Vecenv is used for inference.
* Fixed multi-gpu training with central value.
* Fixed max_frames termination condition, and it's interaction with the linear learning rate: https://github.com/Denys88/rl_games/issues/212
* Fixed "deterministic" misspelling issue.
* Fixed Mujoco and Brax SAC configs.
* Fixed multiagent envs statistics reporting. Fixed Starcraft2 SMAC environments.
1.5.2
* Added observation normalization to the SAC.
* Returned back adaptive KL legacy mode.
1.5.1
* Fixed build package issue.
1.5.0
* Added wandb support.
* Added poetry support.
* Fixed various bugs.
* Fixed cnn input was not divided by 255 in case of the dictionary obs.
* Added more envpool mujoco and atari training examples. Some of the results: 15 min Mujoco humanoid training, 2 min atari pong.
* Added Brax and Mujoco colab training examples.
* Added 'seed' command line parameter. Will override seed in config in case it's > 0.
* Deprecated `horovod` in favor of `torch.distributed` ([#171](https://github.com/Denys88/rl_games/pull/171)).
1.4.0
* Added discord channel https://discord.gg/hnYRq7DsQh :)
* Added envpool support with a few atari examples. Works 3-4x time faster than ray.
* Added mujoco results. Much better than openai spinning up ppo results.
* Added tcnn(https://github.com/NVlabs/tiny-cuda-nn) support. Reduces 5-10% of training time in the IsaacGym envs.
* Various fixes and improvements.
1.3.2
* Added 'sigma' command line parameter. Will override sigma for continuous space in case if fixed_sigma is True.
1.3.1
* Fixed SAC not working
1.3.0
* Simplified rnn implementation. Works a little bit slower but much more stable.
* Now central value can be non-rnn if policy is rnn.
* Removed load_checkpoint from the yaml file. now --checkpoint works for both train and play.
1.2.0
* Added Swish (SILU) and GELU activations, it can improve Isaac Gym results for some of the envs.
* Removed tensorflow and made initial cleanup of the old/unused code.
* Simplified runner.
* Now networks are created in the algos with load_network method.
1.1.4
* Fixed crash in a play (test) mode in player, when simulation and rl_devices are not the same.
* Fixed variuos multi gpu errors.
1.1.3
* Fixed crash when running single Isaac Gym environment in a play (test) mode.
* Added config parameter ```clip_actions``` for switching off internal action clipping and rescaling
1.1.0
* Added to pypi: ```pip install rl-games```
* Added reporting env (sim) step fps, without policy inference. Improved naming.
* Renames in yaml config for better readability: steps_num to horizon_length amd lr_threshold to kl_threshold
## Troubleshouting
* Some of the supported envs are not installed with setup.py, you need to manually install them
* Starting from rl-games 1.1.0 old yaml configs won't be compatible with the new version:
* ```steps_num``` should be changed to ```horizon_length``` amd ```lr_threshold``` to ```kl_threshold```
## Known issues
* Running a single environment with Isaac Gym can cause crash, if it happens switch to at least 2 environments simulated in parallel
| /rl-games-1.6.0.tar.gz/rl-games-1.6.0/README.md | 0.932415 | 0.959687 | README.md | pypi |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight.data, bias)
class NoisyFactorizedLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)
sigma_init = sigma_zero / math.sqrt(in_features)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_input", torch.zeros(1, in_features))
self.register_buffer("epsilon_output", torch.zeros(out_features, 1))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
def forward(self, input):
self.epsison_input.normal_()
self.epsilon_output.normal_()
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input.data)
eps_out = func(self.epsilon_output.data)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
class LSTMWithDones(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.weight_ih = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def forward(self, x, dones, init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
assert(init_states)
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
d = dones[:, t]
h_t = h_t * (1 - d)
c_t = c_t * (1 - d)
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
gates = x_t @ self.weight_ih + h_t @ self.weight_hh + self.bias
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS*2]), # forget
torch.tanh(gates[:, HS*2:HS*3]),
torch.sigmoid(gates[:, HS*3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=1)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(1, 0).contiguous()
return hidden_seq, (h_t, c_t) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/layers.py | 0.924313 | 0.59305 | layers.py | pypi |
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from torch import optim
import torch
from torch import nn
import numpy as np
class DiscreteA2CAgent(a2c_common.DiscreteA2CBase):
def __init__(self, base_name, params):
a2c_common.DiscreteA2CBase.__init__(self, base_name, params)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size', 1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'horizon_length' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'normalize_value' : self.normalize_value,
'network' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu,
'zero_rnn_on_done' : self.zero_rnn_on_done
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', False)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
self.has_value_loss = (self.has_central_value and self.use_experimental_cv) \
or (not self.has_phasic_policy_gradients and not self.has_central_value)
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
processed_obs = self._preproc_obs(obs['obs'])
action_masks = torch.BoolTensor(action_masks).to(self.ppo_device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'action_masks' : action_masks,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
input_dict = {
'is_train': False,
'states' : obs['states'],
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.is_multi_discrete:
action_masks = torch.cat(action_masks, dim=-1)
res_dict['action_masks'] = action_masks
return res_dict
def train_actor_critic(self, input_dict):
self.set_train()
self.calc_gradients(input_dict)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.last_lr
return self.train_result
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
if self.use_action_masks:
batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
batch_dict['bptt_len'] = self.bptt_len
if self.zero_rnn_on_done:
batch_dict['dones'] = input_dict['dones']
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy = losses[0], losses[1], losses[2]
loss = a_loss + 0.5 *c_loss * self.critic_coef - entropy * self.entropy_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
self.trancate_gradients_and_step()
with torch.no_grad():
kl_dist = 0.5 * ((old_action_log_probs_batch - action_log_probs)**2)
if rnn_masks is not None:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() # / sum_mask
else:
kl_dist = kl_dist.mean()
self.diagnostics.mini_batch(self,
{
'values' : value_preds_batch,
'returns' : return_batch,
'new_neglogp' : action_log_probs,
'old_neglogp' : old_action_log_probs_batch,
'masks' : rnn_masks
}, curr_e_clip, 0)
self.train_result = (a_loss, c_loss, entropy, kl_dist,self.last_lr, lr_mul) | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/a2c_discrete.py | 0.837952 | 0.303545 | a2c_discrete.py | pypi |
from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
MODEL_REGISTRY = {}
def register_network(name, target_class):
NETWORK_REGISTRY[name] = lambda **kwargs: target_class()
def register_model(name, target_class):
MODEL_REGISTRY[name] = lambda network, **kwargs: target_class(network)
class NetworkBuilder:
def __init__(self):
self.network_factory = object_factory.ObjectFactory()
self.network_factory.set_builders(NETWORK_REGISTRY)
self.network_factory.register_builder('actor_critic', lambda **kwargs: network_builder.A2CBuilder())
self.network_factory.register_builder('resnet_actor_critic',
lambda **kwargs: network_builder.A2CResnetBuilder())
self.network_factory.register_builder('rnd_curiosity', lambda **kwargs: network_builder.RNDCuriosityBuilder())
self.network_factory.register_builder('soft_actor_critic', lambda **kwargs: network_builder.SACBuilder())
def load(self, params):
network_name = params['name']
network = self.network_factory.create(network_name)
network.load(params)
return network
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.set_builders(MODEL_REGISTRY)
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs: models.ModelA2C(network))
self.model_factory.register_builder('multi_discrete_a2c',
lambda network, **kwargs: models.ModelA2CMultiDiscrete(network))
self.model_factory.register_builder('continuous_a2c',
lambda network, **kwargs: models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd',
lambda network, **kwargs: models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('soft_actor_critic',
lambda network, **kwargs: models.ModelSACContinuous(network))
self.model_factory.register_builder('central_value',
lambda network, **kwargs: models.ModelCentralValue(network))
self.network_builder = NetworkBuilder()
def get_network_builder(self):
return self.network_builder
def load(self, params):
model_name = params['model']['name']
network = self.network_builder.load(params['network'])
model = self.model_factory.create(model_name, network=network)
return model | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/model_builder.py | 0.676513 | 0.18881 | model_builder.py | pypi |
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, params):
a2c_common.ContinuousA2CBase.__init__(self, base_name, params)
obs_shape = self.obs_shape
build_config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value' : self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(build_config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.bound_loss_type = self.config.get('bound_loss_type', 'bound') # 'regularisation' or 'bound'
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'horizon_length' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'normalize_value' : self.normalize_value,
'network' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu,
'zero_rnn_on_done' : self.zero_rnn_on_done
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
self.has_value_loss = (self.has_central_value and self.use_experimental_cv) \
or (not self.has_phasic_policy_gradients and not self.has_central_value)
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr_mul = 1.0
curr_e_clip = self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
if self.zero_rnn_on_done:
batch_dict['dones'] = input_dict['dones']
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
if self.bound_loss_type == 'regularisation':
b_loss = self.reg_loss(mu)
elif self.bound_loss_type == 'bound':
b_loss = self.bound_loss(mu)
else:
b_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss , entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
self.trancate_gradients_and_step()
with torch.no_grad():
reduce_kl = rnn_masks is None
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if rnn_masks is not None:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.diagnostics.mini_batch(self,
{
'values' : value_preds_batch,
'returns' : return_batch,
'new_neglogp' : action_log_probs,
'old_neglogp' : old_action_log_probs_batch,
'masks' : rnn_masks
}, curr_e_clip, 0)
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def reg_loss(self, mu):
if self.bounds_loss_coef is not None:
reg_loss = (mu*mu).sum(axis=-1)
else:
reg_loss = 0
return reg_loss
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_min(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss | /rl-games-1.6.0.tar.gz/rl-games-1.6.0/rl_games/algos_torch/a2c_continuous.py | 0.714827 | 0.31781 | a2c_continuous.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.