code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def _update_network_parameters(self): """Update parameters in actor network and critic networks.""" soft_update_model(self._target_qf_1, self._qf_1, self._tau) soft_update_model(self._target_qf_2, self._qf_2, self._tau) soft_update_model(self._target_policy, self.policy, self._tau)
Update parameters in actor network and critic networks.
_update_network_parameters
python
rlworkgroup/garage
src/garage/torch/algos/td3.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/td3.py
MIT
def _log_statistics(self): """Output training statistics to dowel such as losses and returns.""" tabular.record('Policy/AveragePolicyLoss', np.mean(self._episode_policy_losses)) tabular.record('QFunction/AverageQFunctionLoss', np.mean(self._episode_q...
Output training statistics to dowel such as losses and returns.
_log_statistics
python
rlworkgroup/garage
src/garage/torch/algos/td3.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/td3.py
MIT
def _train_policy(self, obs, actions, rewards, advantages): r"""Train the policy. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. actions (torch.Tensor): Actions fed to the environment with shape :math:`(N, A...
Train the policy. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. actions (torch.Tensor): Actions fed to the environment with shape :math:`(N, A*)`. rewards (torch.Tensor): Acquired rewards ...
_train_policy
python
rlworkgroup/garage
src/garage/torch/algos/trpo.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/trpo.py
MIT
def _train_once(self, itr, eps): """Train the algorithm once. Args: itr (int): Iteration number. eps (EpisodeBatch): A batch of collected paths. Returns: numpy.float64: Calculated mean value of undiscounted returns. """ obs = np_to_torch(eps...
Train the algorithm once. Args: itr (int): Iteration number. eps (EpisodeBatch): A batch of collected paths. Returns: numpy.float64: Calculated mean value of undiscounted returns.
_train_once
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _train(self, obs, actions, rewards, returns, advs): r"""Train the policy and value function with minibatch. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. actions (torch.Tensor): Actions fed to the environment with shap...
Train the policy and value function with minibatch. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. actions (torch.Tensor): Actions fed to the environment with shape :math:`(N, A*)`. rewards (torch.Tensor...
_train
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _train_value_function(self, obs, returns): r"""Train the value function. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. returns (torch.Tensor): Acquired returns with shape :math:`(N, )`. Returns...
Train the value function. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, O*)`. returns (torch.Tensor): Acquired returns with shape :math:`(N, )`. Returns: torch.Tensor: Calculated mean scalar value of...
_train_value_function
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _compute_loss(self, obs, actions, rewards, valids, baselines): r"""Compute mean value of loss. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, P, O*)`. ...
Compute mean value of loss. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, P, O*)`. actions (torch.Tensor): Actions fed to the environment with sha...
_compute_loss
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _compute_loss_with_adv(self, obs, actions, rewards, advantages): r"""Compute mean value of loss. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N \dot [T], O*)`. actions (torch.Tensor): Actions fed to the environment ...
Compute mean value of loss. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N \dot [T], O*)`. actions (torch.Tensor): Actions fed to the environment with shape :math:`(N \dot [T], A*)`. rewards (torch.Tensor): Acq...
_compute_loss_with_adv
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _compute_advantage(self, rewards, valids, baselines): r"""Compute mean value of loss. Notes: P is the maximum episode length (self.max_episode_length) Args: rewards (torch.Tensor): Acquired rewards with shape :math:`(N, P)`. valids (list[int]): Numbe...
Compute mean value of loss. Notes: P is the maximum episode length (self.max_episode_length) Args: rewards (torch.Tensor): Acquired rewards with shape :math:`(N, P)`. valids (list[int]): Numbers of valid steps in each episode baselines (torch.Tensor)...
_compute_advantage
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _compute_kl_constraint(self, obs): r"""Compute KL divergence. Compute the KL divergence between the old policy distribution and current policy distribution. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation f...
Compute KL divergence. Compute the KL divergence between the old policy distribution and current policy distribution. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation from the environment with shape :mat...
_compute_kl_constraint
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def _compute_policy_entropy(self, obs): r"""Compute entropy value of probability distribution. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, P, O*)`. Returns...
Compute entropy value of probability distribution. Notes: P is the maximum episode length (self.max_episode_length) Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N, P, O*)`. Returns: torch.Tensor: Calculated entropy value...
_compute_policy_entropy
python
rlworkgroup/garage
src/garage/torch/algos/vpg.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/algos/vpg.py
MIT
def log_prob(self, value, pre_tanh_value=None, epsilon=1e-6): """The log likelihood of a sample on the this Tanh Distribution. Args: value (torch.Tensor): The sample whose loglikelihood is being computed. pre_tanh_value (torch.Tensor): The value prior to having t...
The log likelihood of a sample on the this Tanh Distribution. Args: value (torch.Tensor): The sample whose loglikelihood is being computed. pre_tanh_value (torch.Tensor): The value prior to having the tanh function applied to it but after it has been samp...
log_prob
python
rlworkgroup/garage
src/garage/torch/distributions/tanh_normal.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/distributions/tanh_normal.py
MIT
def rsample_with_pre_tanh_value(self, sample_shape=torch.Size()): """Return a sample, sampled from this TanhNormal distribution. Returns the sampled value before the tanh transform is applied and the sampled value with the tanh transform applied to it. Args: sample_shape (l...
Return a sample, sampled from this TanhNormal distribution. Returns the sampled value before the tanh transform is applied and the sampled value with the tanh transform applied to it. Args: sample_shape (list): shape of the return. Note: Gradients pass through ...
rsample_with_pre_tanh_value
python
rlworkgroup/garage
src/garage/torch/distributions/tanh_normal.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/distributions/tanh_normal.py
MIT
def _from_distribution(cls, new_normal): """Construct a new TanhNormal distribution from a normal distribution. Args: new_normal (Independent(Normal)): underlying normal dist for the new TanhNormal distribution. Returns: TanhNormal: A new distribution wh...
Construct a new TanhNormal distribution from a normal distribution. Args: new_normal (Independent(Normal)): underlying normal dist for the new TanhNormal distribution. Returns: TanhNormal: A new distribution whose underlying normal dist is new_no...
_from_distribution
python
rlworkgroup/garage
src/garage/torch/distributions/tanh_normal.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/distributions/tanh_normal.py
MIT
def expand(self, batch_shape, _instance=None): """Returns a new TanhNormal distribution. (or populates an existing instance provided by a derived class) with batch dimensions expanded to `batch_shape`. This method calls :class:`~torch.Tensor.expand` on the distribution's parameters. As ...
Returns a new TanhNormal distribution. (or populates an existing instance provided by a derived class) with batch dimensions expanded to `batch_shape`. This method calls :class:`~torch.Tensor.expand` on the distribution's parameters. As such, this does not allocate new memory for the ex...
expand
python
rlworkgroup/garage
src/garage/torch/distributions/tanh_normal.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/distributions/tanh_normal.py
MIT
def _clip_but_pass_gradient(x, lower=0., upper=1.): """Clipping function that allows for gradients to flow through. Args: x (torch.Tensor): value to be clipped lower (float): lower bound of clipping upper (float): upper bound of clipping Returns: ...
Clipping function that allows for gradients to flow through. Args: x (torch.Tensor): value to be clipped lower (float): lower bound of clipping upper (float): upper bound of clipping Returns: torch.Tensor: x clipped between lower and upper.
_clip_but_pass_gradient
python
rlworkgroup/garage
src/garage/torch/distributions/tanh_normal.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/distributions/tanh_normal.py
MIT
def spec(self): """garage.InOutSpec: Input and output space.""" input_space = akro.Box(-np.inf, np.inf, self._input_dim) output_space = akro.Box(-np.inf, np.inf, self._output_dim) return InOutSpec(input_space, output_space)
garage.InOutSpec: Input and output space.
spec
python
rlworkgroup/garage
src/garage/torch/embeddings/mlp_encoder.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/embeddings/mlp_encoder.py
MIT
def forward(self, x): """Forward method. Args: x (torch.Tensor): Input values. Should match image_format specified at construction (either NCHW or NCWH). Returns: List[torch.Tensor]: Output values """ # Transform single values into batch...
Forward method. Args: x (torch.Tensor): Input values. Should match image_format specified at construction (either NCHW or NCWH). Returns: List[torch.Tensor]: Output values
forward
python
rlworkgroup/garage
src/garage/torch/modules/cnn_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/cnn_module.py
MIT
def _check_spec(spec, image_format): """Check that an InOutSpec is suitable for a CNNModule. Args: spec (garage.InOutSpec): Specification of inputs and outputs. The input should be in 'NCHW' format: [batch_size, channel, height, width]. Will print a warning if the channel size...
Check that an InOutSpec is suitable for a CNNModule. Args: spec (garage.InOutSpec): Specification of inputs and outputs. The input should be in 'NCHW' format: [batch_size, channel, height, width]. Will print a warning if the channel size is not 1 or 3. If output_space ...
_check_spec
python
rlworkgroup/garage
src/garage/torch/modules/cnn_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/cnn_module.py
MIT
def to(self, *args, **kwargs): """Move the module to the specified device. Args: *args: args to pytorch to function. **kwargs: keyword args to pytorch to function. """ super().to(*args, **kwargs) buffers = dict(self.named_buffers()) if not isinst...
Move the module to the specified device. Args: *args: args to pytorch to function. **kwargs: keyword args to pytorch to function.
to
python
rlworkgroup/garage
src/garage/torch/modules/gaussian_mlp_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/gaussian_mlp_module.py
MIT
def forward(self, *inputs): """Forward method. Args: *inputs: Input to the module. Returns: torch.distributions.independent.Independent: Independent distribution. """ mean, log_std_uncentered = self._get_mean_and_log_std(*inputs) ...
Forward method. Args: *inputs: Input to the module. Returns: torch.distributions.independent.Independent: Independent distribution.
forward
python
rlworkgroup/garage
src/garage/torch/modules/gaussian_mlp_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/gaussian_mlp_module.py
MIT
def _get_mean_and_log_std(self, x): """Get mean and std of Gaussian distribution given inputs. Args: x: Input to the module. Returns: torch.Tensor: The mean of Gaussian distribution. torch.Tensor: The variance of Gaussian distribution. """ m...
Get mean and std of Gaussian distribution given inputs. Args: x: Input to the module. Returns: torch.Tensor: The mean of Gaussian distribution. torch.Tensor: The variance of Gaussian distribution.
_get_mean_and_log_std
python
rlworkgroup/garage
src/garage/torch/modules/gaussian_mlp_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/gaussian_mlp_module.py
MIT
def _check_parameter_for_output_layer(cls, var_name, var, n_heads): """Check input parameters for output layer are valid. Args: var_name (str): variable name var (any): variable to be checked n_heads (int): number of head Returns: list: list of v...
Check input parameters for output layer are valid. Args: var_name (str): variable name var (any): variable to be checked n_heads (int): number of head Returns: list: list of variables (length of n_heads) Raises: ValueError: if the va...
_check_parameter_for_output_layer
python
rlworkgroup/garage
src/garage/torch/modules/multi_headed_mlp_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/multi_headed_mlp_module.py
MIT
def forward(self, input_val): """Forward method. Args: input_val (torch.Tensor): Input values with (N, *, input_dim) shape. Returns: List[torch.Tensor]: Output values """ x = input_val for layer in self._layers: x = l...
Forward method. Args: input_val (torch.Tensor): Input values with (N, *, input_dim) shape. Returns: List[torch.Tensor]: Output values
forward
python
rlworkgroup/garage
src/garage/torch/modules/multi_headed_mlp_module.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/modules/multi_headed_mlp_module.py
MIT
def _build_hessian_vector_product(func, params, reg_coeff=1e-5): """Computes Hessian-vector product using Pearlmutter's algorithm. `Pearlmutter, Barak A. "Fast exact multiplication by the Hessian." Neural computation 6.1 (1994): 147-160.` Args: func (callable): A function that returns a torch....
Computes Hessian-vector product using Pearlmutter's algorithm. `Pearlmutter, Barak A. "Fast exact multiplication by the Hessian." Neural computation 6.1 (1994): 147-160.` Args: func (callable): A function that returns a torch.Tensor. Hessian of the return value will be computed. ...
_build_hessian_vector_product
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def _eval(vector): """The evaluation function. Args: vector (torch.Tensor): The vector to be multiplied with Hessian. Returns: torch.Tensor: The product of Hessian of function f and v. """ unflatten_vector = unflatten_tensors(vector, par...
The evaluation function. Args: vector (torch.Tensor): The vector to be multiplied with Hessian. Returns: torch.Tensor: The product of Hessian of function f and v.
_eval
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def _conjugate_gradient(f_Ax, b, cg_iters, residual_tol=1e-10): """Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312. Args: f_Ax (callable): A function to compute Hessian vector product. b (torch.Tensor): Right hand side of the equation to solve. cg_iters (int): Number of i...
Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312. Args: f_Ax (callable): A function to compute Hessian vector product. b (torch.Tensor): Right hand side of the equation to solve. cg_iters (int): Number of iterations to run conjugate gradient algorithm. resi...
_conjugate_gradient
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def step(self, f_loss, f_constraint): # pylint: disable=arguments-differ """Take an optimization step. Args: f_loss (callable): Function to compute the loss. f_constraint (callable): Function to compute the constraint value. """ # Collect trainable parameters a...
Take an optimization step. Args: f_loss (callable): Function to compute the loss. f_constraint (callable): Function to compute the constraint value.
step
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def state(self): """dict: The hyper-parameters of the optimizer.""" return { 'max_constraint_value': self._max_constraint_value, 'cg_iters': self._cg_iters, 'max_backtracks': self._max_backtracks, 'backtrack_ratio': self._backtrack_ratio, 'hvp_...
dict: The hyper-parameters of the optimizer.
state
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def __setstate__(self, state): """Restore the optimizer state. Args: state (dict): State dictionary. """ if 'hvp_reg_coeff' not in state['state']: warnings.warn( 'Resuming ConjugateGradientOptimizer with lost state. ' 'This behavi...
Restore the optimizer state. Args: state (dict): State dictionary.
__setstate__
python
rlworkgroup/garage
src/garage/torch/optimizers/conjugate_gradient_optimizer.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/conjugate_gradient_optimizer.py
MIT
def zero_grad(self): """Sets gradients of all model parameters to zero.""" for param in self.module.parameters(): if param.grad is not None: param.grad.detach_() param.grad.zero_()
Sets gradients of all model parameters to zero.
zero_grad
python
rlworkgroup/garage
src/garage/torch/optimizers/differentiable_sgd.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/differentiable_sgd.py
MIT
def set_grads_none(self): """Sets gradients for all model parameters to None. This is an alternative to `zero_grad` which sets gradients to zero. """ for param in self.module.parameters(): if param.grad is not None: param.grad = None
Sets gradients for all model parameters to None. This is an alternative to `zero_grad` which sets gradients to zero.
set_grads_none
python
rlworkgroup/garage
src/garage/torch/optimizers/differentiable_sgd.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/differentiable_sgd.py
MIT
def get_minibatch(self, *inputs): r"""Yields a batch of inputs. Notes: P is the size of minibatch (self._minibatch_size) Args: *inputs (list[torch.Tensor]): A list of inputs. Each input has shape :math:`(N \dot [T], *)`. Yields: list[torch.Tenso...
Yields a batch of inputs. Notes: P is the size of minibatch (self._minibatch_size) Args: *inputs (list[torch.Tensor]): A list of inputs. Each input has shape :math:`(N \dot [T], *)`. Yields: list[torch.Tensor]: A list batch of inputs. Each batch has sha...
get_minibatch
python
rlworkgroup/garage
src/garage/torch/optimizers/optimizer_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/optimizers/optimizer_wrapper.py
MIT
def forward(self, observations): """Compute the action distributions from the observations. Args: observations (torch.Tensor): Observations to act on. Returns: torch.distributions.Distribution: Batch distribution of actions. dict[str, torch.Tensor]: Addition...
Compute the action distributions from the observations. Args: observations (torch.Tensor): Observations to act on. Returns: torch.distributions.Distribution: Batch distribution of actions. dict[str, torch.Tensor]: Additional agent_info, as torch Tensors. ...
forward
python
rlworkgroup/garage
src/garage/torch/policies/categorical_cnn_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/categorical_cnn_policy.py
MIT
def reset_belief(self, num_tasks=1): r"""Reset :math:`q(z \| c)` to the prior and sample a new z from the prior. Args: num_tasks (int): Number of tasks. """ # reset distribution over z to the prior mu = torch.zeros(num_tasks, self._latent_dim).to(global_device()) ...
Reset :math:`q(z \| c)` to the prior and sample a new z from the prior. Args: num_tasks (int): Number of tasks.
reset_belief
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def sample_from_belief(self): """Sample z using distributions from current means and variances.""" if self._use_information_bottleneck: posteriors = [ torch.distributions.Normal(m, torch.sqrt(s)) for m, s in zip( torch.unbind(self.z_means), torch.unbind(se...
Sample z using distributions from current means and variances.
sample_from_belief
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def update_context(self, timestep): """Append single transition to the current context. Args: timestep (garage._dtypes.TimeStep): Timestep containing transition information to be added to context. """ o = torch.as_tensor(timestep.observation[None, None, ...]...
Append single transition to the current context. Args: timestep (garage._dtypes.TimeStep): Timestep containing transition information to be added to context.
update_context
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def infer_posterior(self, context): r"""Compute :math:`q(z \| c)` as a function of input context and sample new z. Args: context (torch.Tensor): Context values, with shape :math:`(X, N, C)`. X is the number of tasks. N is batch size. C is the combined size of...
Compute :math:`q(z \| c)` as a function of input context and sample new z. Args: context (torch.Tensor): Context values, with shape :math:`(X, N, C)`. X is the number of tasks. N is batch size. C is the combined size of observation, action, reward, and next ...
infer_posterior
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def forward(self, obs, context): """Given observations and context, get actions and probs from policy. Args: obs (torch.Tensor): Observation values, with shape :math:`(X, N, O)`. X is the number of tasks. N is batch size. O is the size of the flattened obser...
Given observations and context, get actions and probs from policy. Args: obs (torch.Tensor): Observation values, with shape :math:`(X, N, O)`. X is the number of tasks. N is batch size. O is the size of the flattened observation space. context (torch.Ten...
forward
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def get_action(self, obs): """Sample action from the policy, conditioned on the task embedding. Args: obs (torch.Tensor): Observation values, with shape :math:`(1, O)`. O is the size of the flattened observation space. Returns: torch.Tensor: Output actio...
Sample action from the policy, conditioned on the task embedding. Args: obs (torch.Tensor): Observation values, with shape :math:`(1, O)`. O is the size of the flattened observation space. Returns: torch.Tensor: Output action value, with shape :math:`(1, A)`. ...
get_action
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def compute_kl_div(self): r"""Compute :math:`KL(q(z|c) \| p(z))`. Returns: float: :math:`KL(q(z|c) \| p(z))`. """ prior = torch.distributions.Normal( torch.zeros(self._latent_dim).to(global_device()), torch.ones(self._latent_dim).to(global_device()))...
Compute :math:`KL(q(z|c) \| p(z))`. Returns: float: :math:`KL(q(z|c) \| p(z))`.
compute_kl_div
python
rlworkgroup/garage
src/garage/torch/policies/context_conditioned_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/context_conditioned_policy.py
MIT
def __init__(self, env_spec, name='DeterministicMLPPolicy', **kwargs): """Initialize class with multiple attributes. Args: env_spec (EnvSpec): Environment specification. name (str): Policy name. **kwargs: Additional keyword arguments passed to the MLPModule. ...
Initialize class with multiple attributes. Args: env_spec (EnvSpec): Environment specification. name (str): Policy name. **kwargs: Additional keyword arguments passed to the MLPModule.
__init__
python
rlworkgroup/garage
src/garage/torch/policies/deterministic_mlp_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/deterministic_mlp_policy.py
MIT
def get_action(self, observation): """Get a single action given an observation. Args: observation (np.ndarray): Observation from the environment. Returns: tuple: * np.ndarray: Predicted action. * dict: * np.ndarray[flo...
Get a single action given an observation. Args: observation (np.ndarray): Observation from the environment. Returns: tuple: * np.ndarray: Predicted action. * dict: * np.ndarray[float]: Mean of the distribution ...
get_action
python
rlworkgroup/garage
src/garage/torch/policies/deterministic_mlp_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/deterministic_mlp_policy.py
MIT
def get_actions(self, observations): """Get actions given observations. Args: observations (np.ndarray): Observations from the environment. Returns: tuple: * np.ndarray: Predicted actions. * dict: * np.ndarray[float]: ...
Get actions given observations. Args: observations (np.ndarray): Observations from the environment. Returns: tuple: * np.ndarray: Predicted actions. * dict: * np.ndarray[float]: Mean of the distribution * n...
get_actions
python
rlworkgroup/garage
src/garage/torch/policies/deterministic_mlp_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/deterministic_mlp_policy.py
MIT
def forward(self, observations): """Compute the action distributions from the observations. Args: observations(torch.Tensor): Batch of observations of shape :math:`(N, O)`. Observations should be flattened even if they are images as the underlying Q network h...
Compute the action distributions from the observations. Args: observations(torch.Tensor): Batch of observations of shape :math:`(N, O)`. Observations should be flattened even if they are images as the underlying Q network handles unflattening. ...
forward
python
rlworkgroup/garage
src/garage/torch/policies/discrete_cnn_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/discrete_cnn_policy.py
MIT
def forward(self, observations): """Get actions corresponding to a batch of observations. Args: observations(torch.Tensor): Batch of observations of shape :math:`(N, O)`. Observations should be flattened even if they are images as the underlying Q network han...
Get actions corresponding to a batch of observations. Args: observations(torch.Tensor): Batch of observations of shape :math:`(N, O)`. Observations should be flattened even if they are images as the underlying Q network handles unflattening. ...
forward
python
rlworkgroup/garage
src/garage/torch/policies/discrete_qf_argmax_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/discrete_qf_argmax_policy.py
MIT
def get_action(self, observation): """Get a single action given an observation. Args: observation (np.ndarray): Observation with shape :math:`(O, )`. Returns: torch.Tensor: Predicted action with shape :math:`(A, )`. dict: Empty since this policy does not pro...
Get a single action given an observation. Args: observation (np.ndarray): Observation with shape :math:`(O, )`. Returns: torch.Tensor: Predicted action with shape :math:`(A, )`. dict: Empty since this policy does not produce a distribution.
get_action
python
rlworkgroup/garage
src/garage/torch/policies/discrete_qf_argmax_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/discrete_qf_argmax_policy.py
MIT
def get_actions(self, observations): """Get actions given observations. Args: observations (np.ndarray): Batch of observations, should have shape :math:`(N, O)`. Returns: torch.Tensor: Predicted actions. Tensor has shape :math:`(N, A)`. dict:...
Get actions given observations. Args: observations (np.ndarray): Batch of observations, should have shape :math:`(N, O)`. Returns: torch.Tensor: Predicted actions. Tensor has shape :math:`(N, A)`. dict: Empty since this policy does not produce a dist...
get_actions
python
rlworkgroup/garage
src/garage/torch/policies/discrete_qf_argmax_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/discrete_qf_argmax_policy.py
MIT
def forward(self, observations): """Compute the action distributions from the observations. Args: observations (torch.Tensor): Batch of observations on default torch device. Returns: torch.distributions.Distribution: Batch distribution of actions. ...
Compute the action distributions from the observations. Args: observations (torch.Tensor): Batch of observations on default torch device. Returns: torch.distributions.Distribution: Batch distribution of actions. dict[str, torch.Tensor]: Additional ag...
forward
python
rlworkgroup/garage
src/garage/torch/policies/gaussian_mlp_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/gaussian_mlp_policy.py
MIT
def get_action(self, observation): r"""Get a single action given an observation. Args: observation (np.ndarray): Observation from the environment. Shape is :math:`env_spec.observation_space`. Returns: tuple: * np.ndarray: Predicted action...
Get a single action given an observation. Args: observation (np.ndarray): Observation from the environment. Shape is :math:`env_spec.observation_space`. Returns: tuple: * np.ndarray: Predicted action. Shape is :math:`env_spec....
get_action
python
rlworkgroup/garage
src/garage/torch/policies/stochastic_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
MIT
def get_actions(self, observations): r"""Get actions given observations. Args: observations (np.ndarray): Observations from the environment. Shape is :math:`batch_dim \bullet env_spec.observation_space`. Returns: tuple: * np.ndarray: Pred...
Get actions given observations. Args: observations (np.ndarray): Observations from the environment. Shape is :math:`batch_dim \bullet env_spec.observation_space`. Returns: tuple: * np.ndarray: Predicted actions. :math:`batch_d...
get_actions
python
rlworkgroup/garage
src/garage/torch/policies/stochastic_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
MIT
def forward(self, observations): """Compute the action distributions from the observations. Args: observations (torch.Tensor): Batch of observations on default torch device. Returns: torch.distributions.Distribution: Batch distribution of actions. ...
Compute the action distributions from the observations. Args: observations (torch.Tensor): Batch of observations on default torch device. Returns: torch.distributions.Distribution: Batch distribution of actions. dict[str, torch.Tensor]: Additional ag...
forward
python
rlworkgroup/garage
src/garage/torch/policies/stochastic_policy.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/policies/stochastic_policy.py
MIT
def __init__(self, env_spec, **kwargs): """Initialize class with multiple attributes. Args: env_spec (EnvSpec): Environment specification. **kwargs: Keyword arguments. """ self._env_spec = env_spec self._obs_dim = env_spec.observation_space.flat_dim ...
Initialize class with multiple attributes. Args: env_spec (EnvSpec): Environment specification. **kwargs: Keyword arguments.
__init__
python
rlworkgroup/garage
src/garage/torch/q_functions/continuous_mlp_q_function.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/q_functions/continuous_mlp_q_function.py
MIT
def forward(self, observations): """Return Q-value(s). Args: observations (np.ndarray): observations of shape :math: `(N, O*)`. Returns: torch.Tensor: Output value """ # We're given flattened observations. observations = observations.reshape( ...
Return Q-value(s). Args: observations (np.ndarray): observations of shape :math: `(N, O*)`. Returns: torch.Tensor: Output value
forward
python
rlworkgroup/garage
src/garage/torch/q_functions/discrete_cnn_q_function.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/q_functions/discrete_cnn_q_function.py
MIT
def compute_loss(self, obs, returns): r"""Compute mean value of loss. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N \dot [T], O*)`. returns (torch.Tensor): Acquired returns with shape :math:`(N, )`. Returns: ...
Compute mean value of loss. Args: obs (torch.Tensor): Observation from the environment with shape :math:`(N \dot [T], O*)`. returns (torch.Tensor): Acquired returns with shape :math:`(N, )`. Returns: torch.Tensor: Calculated negative mean scalar valu...
compute_loss
python
rlworkgroup/garage
src/garage/torch/value_functions/gaussian_mlp_value_function.py
https://github.com/rlworkgroup/garage/blob/master/src/garage/torch/value_functions/gaussian_mlp_value_function.py
MIT
def step_env(env, n=10, visualize=True): """Step env helper. Args: env (Environment): Input environment. n (int): Steps. visualize (bool): Whether visualize the environment. """ env.reset() if visualize and issubclass(type(env), Environment): env.visualize() for...
Step env helper. Args: env (Environment): Input environment. n (int): Steps. visualize (bool): Whether visualize the environment.
step_env
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def step_env_with_gym_quirks(env, spec, n=10, visualize=True, serialize_env=False): """Step env helper. Args: env (Environment): Input environment. spec (EnvSpec): The environment...
Step env helper. Args: env (Environment): Input environment. spec (EnvSpec): The environment specification. n (int): Steps. visualize (bool): Whether to visualize the environment. serialize_env (bool): Whether to serialize the environment.
step_env_with_gym_quirks
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def convolve(_input, filter_weights, filter_bias, strides, filters, in_channels, hidden_nonlinearity): """Helper function for performing convolution. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. filter_weights (tuple(tf.Tensor)): The weights of the filters. filter_...
Helper function for performing convolution. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. filter_weights (tuple(tf.Tensor)): The weights of the filters. filter_bias (tuple(tf.Tensor)): The bias of the filters. strides (tuple[int]): The stride of the sliding window. For examp...
convolve
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def recurrent_step_lstm(input_val, num_units, step_hidden, step_cell, w_x_init, w_h_init, b_init, nonlinearity, gate_nonlinearit...
Helper function for performing feedforward of a lstm cell. Args: input_val (tf.Tensor): Input placeholder. num_units (int): Hidden dimension for LSTM cell. step_hidden (tf.Tensor): Place holder for step hidden state. step_cell (tf.Tensor): Place holder for step cell state. n...
recurrent_step_lstm
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def recurrent_step_gru(input_val, num_units, step_hidden, w_x_init, w_h_init, b_init, nonlinearity, gate_nonlinearity, forget_bias=1.0):...
Helper function for performing feedforward of a GRU cell. Args: input_val (tf.Tensor): Input placeholder. num_units (int): Hidden dimension for GRU cell. step_hidden (tf.Tensor): Place holder for step hidden state. nonlinearity (callable): Activation function for intermediate ...
recurrent_step_gru
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def _construct_image_vector(_input, batch, w, h, filter_width, filter_height, in_shape): """Get sliding window of input image. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. batch (int): Batch index. w (int): Width index. h (int): Height index....
Get sliding window of input image. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. batch (int): Batch index. w (int): Width index. h (int): Height index. filter_width (int): Width of the filter. filter_height (int): Height of the filter. in_shape (int):...
_construct_image_vector
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def max_pooling(_input, pool_shape, pool_stride, padding='VALID'): """Helper function for performing max pooling. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. pool_shape (int): Dimension of the pooling layer. pool_stride (int): The stride of the pooling layer. padding (...
Helper function for performing max pooling. Args: _input (tf.Tensor): Input tf.Tensor to the CNN. pool_shape (int): Dimension of the pooling layer. pool_stride (int): The stride of the pooling layer. padding (str): The type of padding algorithm to use, either 'SAME' or '...
max_pooling
python
rlworkgroup/garage
tests/helpers.py
https://github.com/rlworkgroup/garage/blob/master/tests/helpers.py
MIT
def __init__(self, env=None, env_name='', max_episode_length=100): """Create an AutoStepEnv. Args: env (gym.Env): Environment to be wrapped. env_name (str): Name of the environment. max_episode_length (int): Maximum length of the episode. """ if env_n...
Create an AutoStepEnv. Args: env (gym.Env): Environment to be wrapped. env_name (str): Name of the environment. max_episode_length (int): Maximum length of the episode.
__init__
python
rlworkgroup/garage
tests/wrappers.py
https://github.com/rlworkgroup/garage/blob/master/tests/wrappers.py
MIT
def step(self, action): """Step the wrapped environment. Args: action (np.ndarray): the action. Returns: np.ndarray: Next observation float: Reward bool: Termination signal dict: Environment information """ self._episo...
Step the wrapped environment. Args: action (np.ndarray): the action. Returns: np.ndarray: Next observation float: Reward bool: Termination signal dict: Environment information
step
python
rlworkgroup/garage
tests/wrappers.py
https://github.com/rlworkgroup/garage/blob/master/tests/wrappers.py
MIT
def setup_method(self): """Setup the Session and default Graph.""" self.graph = tf.Graph() for c in self.graph.collections: self.graph.clear_collection(c) self.graph_manager = self.graph.as_default() self.graph_manager.__enter__() self.sess = tf.compat.v1.Sess...
Setup the Session and default Graph.
setup_method
python
rlworkgroup/garage
tests/fixtures/fixtures.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/fixtures.py
MIT
def teardown_method(self): """Teardown the Session and default Graph.""" logger.remove_all() self.sess.__exit__(None, None, None) self.sess_manager.__exit__(None, None, None) self.graph_manager.__exit__(None, None, None) self.sess.close() # These del are crucial ...
Teardown the Session and default Graph.
teardown_method
python
rlworkgroup/garage
tests/fixtures/fixtures.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/fixtures.py
MIT
def train(self, trainer): """Obtain samplers and start actual training for each epoch. See garage.np.algos.RLAlgorithm train(). Args: trainer (Trainer): Trainer is passed to give algorithm the access to trainer.step_epochs(), which provides services ...
Obtain samplers and start actual training for each epoch. See garage.np.algos.RLAlgorithm train(). Args: trainer (Trainer): Trainer is passed to give algorithm the access to trainer.step_epochs(), which provides services such as snapshotting and sampler cont...
train
python
rlworkgroup/garage
tests/fixtures/algos/dummy_algo.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_algo.py
MIT
def init_opt(self): """Initialize the optimization procedure. If using tensorflow, this may include declaring all the variables and compiling functions. """
Initialize the optimization procedure. If using tensorflow, this may include declaring all the variables and compiling functions.
init_opt
python
rlworkgroup/garage
tests/fixtures/algos/dummy_tf_algo.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_tf_algo.py
MIT
def optimize_policy(self, samples_data): """Optimize the policy using the samples. Args: samples_data (dict): Processed sample data. """
Optimize the policy using the samples. Args: samples_data (dict): Processed sample data.
optimize_policy
python
rlworkgroup/garage
tests/fixtures/algos/dummy_tf_algo.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/algos/dummy_tf_algo.py
MIT
def render(self, mode='human'): """Render. Args: mode (str): Render mode. """
Render. Args: mode (str): Render mode.
render
python
rlworkgroup/garage
tests/fixtures/envs/dummy/base.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/base.py
MIT
def action_space(self): """Return an action space. Returns: gym.spaces: The action space of the environment. """ return akro.Box(low=-5.0, high=5.0, shape=self._action_dim, dtype=np.float32)
Return an action space. Returns: gym.spaces: The action space of the environment.
action_space
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_box_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_box_env.py
MIT
def observation_space(self): """Return the observation space. Returns: akro.Dict: Observation space. """ if self.obs_space_type == 'box': return gym.spaces.Dict({ 'achieved_goal': gym.spaces.Box(low=-200., ...
Return the observation space. Returns: akro.Dict: Observation space.
observation_space
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_dict_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
MIT
def action_space(self): """Return the action space. Returns: akro.Box: Action space. """ if self.act_space_type == 'box': return akro.Box(low=-5.0, high=5.0, shape=(1, ), dtype=np.float32) else: return akro.Discrete(5)
Return the action space. Returns: akro.Box: Action space.
action_space
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_dict_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
MIT
def compute_reward(self, achieved_goal, goal, info): """Function to compute new reward. Args: achieved_goal (numpy.ndarray): Achieved goal. goal (numpy.ndarray): Original desired goal. info (dict): Extra information. Returns: float: New computed ...
Function to compute new reward. Args: achieved_goal (numpy.ndarray): Achieved goal. goal (numpy.ndarray): Original desired goal. info (dict): Extra information. Returns: float: New computed reward.
compute_reward
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_dict_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_dict_env.py
MIT
def reset(self): """Reset the environment. Returns: np.ndarray: Environment state. """ self.state = np.ones(self._obs_dim, dtype=np.uint8) self._lives = 5 self.step_called = 0 return self.state
Reset the environment. Returns: np.ndarray: Environment state.
reset
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
MIT
def step(self, action): """Step the environment. Before gym fixed overflow issue for sample() in np.uint8 environment, we will handle the sampling here. We need high=256 since np.random.uniform sample from [low, high) (includes low, but excludes high). Args: ...
Step the environment. Before gym fixed overflow issue for sample() in np.uint8 environment, we will handle the sampling here. We need high=256 since np.random.uniform sample from [low, high) (includes low, but excludes high). Args: action (int): Action. Ret...
step
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py
MIT
def __init__(self, frames): """ LazyFrames class from baselines. Openai baselines use this class for FrameStack environment wrapper. It is used for testing garage.envs.wrappers.AtariEnv. garge.envs.wrapper.AtariEnv is used when algorithms are trained using baselines wrap...
LazyFrames class from baselines. Openai baselines use this class for FrameStack environment wrapper. It is used for testing garage.envs.wrappers.AtariEnv. garge.envs.wrapper.AtariEnv is used when algorithms are trained using baselines wrappers, e.g. during benchmarking. ...
__init__
python
rlworkgroup/garage
tests/fixtures/envs/dummy/dummy_discrete_pixel_env_baselines.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/envs/dummy/dummy_discrete_pixel_env_baselines.py
MIT
def fixture_exp(snapshot_config, sess): """Dummy fixture experiment function. Args: snapshot_config (garage.experiment.SnapshotConfig): The snapshot configuration used by Trainer to create the snapshotter. If None, it will create one with default settings. sess (tf.Sessi...
Dummy fixture experiment function. Args: snapshot_config (garage.experiment.SnapshotConfig): The snapshot configuration used by Trainer to create the snapshotter. If None, it will create one with default settings. sess (tf.Session): An optional TensorFlow session. ...
fixture_exp
python
rlworkgroup/garage
tests/fixtures/experiment/fixture_experiment.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/experiment/fixture_experiment.py
MIT
def _build(self, obs_input, step_obs_input, step_hidden, name=None): """Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. step_obs_input (tf.Tensor): Single timestep observation input. step_hidden (tf.Tensor): Hidden state for step. ...
Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. step_obs_input (tf.Tensor): Single timestep observation input. step_hidden (tf.Tensor): Hidden state for step. name (str): Name of the model, also the name scope. Returns: ...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_categorical_gru_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_gru_model.py
MIT
def _build(self, obs_input, step_obs_input, step_hidden, step_cell, name=None): """Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. step_obs_input (tf.Tensor): Single timest...
Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. step_obs_input (tf.Tensor): Single timestep observation input. step_hidden (tf.Tensor): Hidden state for step. step_cell (tf.Tensor): Cell state for step. name (str): Name...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_categorical_lstm_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_lstm_model.py
MIT
def _build(self, obs_input, name=None): """Build model. Args: obs_input (tf.Tensor): Observation inputs. name (str): Name of the model, also the name scope. Returns: tf.Tensor: Network outputs. tfp.distributions.OneHotCategorical: Distribution. ...
Build model. Args: obs_input (tf.Tensor): Observation inputs. name (str): Name of the model, also the name scope. Returns: tf.Tensor: Network outputs. tfp.distributions.OneHotCategorical: Distribution.
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_categorical_mlp_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_categorical_mlp_model.py
MIT
def _build(self, obs_input, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.mo...
Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tf.Te...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_cnn_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_cnn_model.py
MIT
def _build(self, obs_input, step_obs_input, step_hidden, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Place holder for entire time-series inputs. step_obs_input (tf.Tensor): Place holder for step inputs. step_hid...
Build model given input placeholder(s). Args: obs_input (tf.Tensor): Place holder for entire time-series inputs. step_obs_input (tf.Tensor): Place holder for step inputs. step_hidden (tf.Tensor): Place holder for step hidden state. name (str): Inn...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_gru_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_gru_model.py
MIT
def _build(self, obs_input, step_obs_input, step_hidden, step_cell, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Place holder for entire time-series inputs. ...
Build model given input placeholder(s). Args: obs_input (tf.Tensor): Place holder for entire time-series inputs. step_obs_input (tf.Tensor): Place holder for step inputs. step_hidden (tf.Tensor): Place holder for step hidden state. step_cell (tf.T...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_lstm_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_lstm_model.py
MIT
def _build(self, obs_input, act_input, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. act_input (tf.Tensor): Tensor input for action. name (str): Inner model name, also the variable scope of the ...
Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. act_input (tf.Tensor): Tensor input for action. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is gar...
_build
python
rlworkgroup/garage
tests/fixtures/models/simple_mlp_merge_model.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/models/simple_mlp_merge_model.py
MIT
def get_actions(self, observations): """Get multiple actions from this policy for the input observations. Args: observations (numpy.ndarray): Observations from environment. Returns: numpy.ndarray: Predicted actions. dict: Distribution parameters. ""...
Get multiple actions from this policy for the input observations. Args: observations (numpy.ndarray): Observations from environment. Returns: numpy.ndarray: Predicted actions. dict: Distribution parameters.
get_actions
python
rlworkgroup/garage
tests/fixtures/policies/dummy_policy.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/policies/dummy_policy.py
MIT
def ray_local_session_fixture(): """Initializes Ray and shuts down Ray in local mode. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown. """ ...
Initializes Ray and shuts down Ray in local mode. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown.
ray_local_session_fixture
python
rlworkgroup/garage
tests/fixtures/sampler/ray_fixtures.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/sampler/ray_fixtures.py
MIT
def ray_session_fixture(): """Initializes Ray and shuts down Ray. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown. """ if not ray.is_in...
Initializes Ray and shuts down Ray. Yields: None: Yield is for purposes of pytest module style. All statements before the yield are apart of module setup, and all statements after the yield are apart of module teardown.
ray_session_fixture
python
rlworkgroup/garage
tests/fixtures/sampler/ray_fixtures.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/sampler/ray_fixtures.py
MIT
def train_once(self, itr, paths): """Perform one step of policy optimization given one batch of samples. Args: itr (int): Iteration number. paths (list[dict]): A list of collected paths. """
Perform one step of policy optimization given one batch of samples. Args: itr (int): Iteration number. paths (list[dict]): A list of collected paths.
train_once
python
rlworkgroup/garage
tests/fixtures/tf/algos/dummy_off_policy_algo.py
https://github.com/rlworkgroup/garage/blob/master/tests/fixtures/tf/algos/dummy_off_policy_algo.py
MIT
def test_tf_make_optimizer_with_type(self): """Test make_optimizer function with type as first argument.""" optimizer_type = tf.compat.v1.train.AdamOptimizer lr = 0.123 optimizer = make_optimizer(optimizer_type, learning_rate=lr, ...
Test make_optimizer function with type as first argument.
test_tf_make_optimizer_with_type
python
rlworkgroup/garage
tests/garage/test_functions.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
MIT
def test_tf_make_optimizer_with_tuple(self): """Test make_optimizer function with tuple as first argument.""" lr = 0.123 optimizer_type = (tf.compat.v1.train.AdamOptimizer, { 'learning_rate': lr }) optimizer = make_optimizer(optimizer_type) # pylint: disable=i...
Test make_optimizer function with tuple as first argument.
test_tf_make_optimizer_with_tuple
python
rlworkgroup/garage
tests/garage/test_functions.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/test_functions.py
MIT
def _init_multi_env_wrapper(self, env_names, sample_strategy=uniform_random_strategy): """helper function to initialize multi_env_wrapper Args: env_names (list(str)): List of Environment names. sample_strategy (func...
helper function to initialize multi_env_wrapper Args: env_names (list(str)): List of Environment names. sample_strategy (func): A sampling strategy. Returns: garage.envs.multi_env_wrapper: Multi env wrapper.
_init_multi_env_wrapper
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_tasks_from_same_env(self): """test init with multiple tasks from same env""" envs = ['CartPole-v0', 'CartPole-v0'] mt_env = self._init_multi_env_wrapper(envs) assert mt_env.num_tasks == 2
test init with multiple tasks from same env
test_tasks_from_same_env
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_tasks_from_different_envs(self): """test init with multiple tasks from different env""" envs = ['CartPole-v0', 'CartPole-v1'] mt_env = self._init_multi_env_wrapper(envs) assert mt_env.num_tasks == 2
test init with multiple tasks from different env
test_tasks_from_different_envs
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_raise_exception_when_different_obs_space(self): """test if exception is raised when using tasks with different obs space""" # noqa: E501 envs = ['CartPole-v0', 'Blackjack-v0'] with pytest.raises(ValueError): _ = self._init_multi_env_wrapper(envs)
test if exception is raised when using tasks with different obs space
test_raise_exception_when_different_obs_space
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_raise_exception_when_different_action_space(self): """test if exception is raised when using tasks with different action space""" # noqa: E501 envs = ['LunarLander-v2', 'LunarLanderContinuous-v2'] with pytest.raises(ValueError): _ = self._init_multi_env_wrapper(envs)
test if exception is raised when using tasks with different action space
test_raise_exception_when_different_action_space
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_default_active_task_is_none(self): """test if default active task is none""" envs = ['CartPole-v0', 'CartPole-v1'] mt_env = self._init_multi_env_wrapper( envs, sample_strategy=round_robin_strategy) assert mt_env._active_task_index is None
test if default active task is none
test_default_active_task_is_none
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_one_hot_observation_space(self): """test one hot representation of observation space""" envs = ['CartPole-v0', 'CartPole-v1'] mt_env = self._init_multi_env_wrapper(envs) cartpole = GymEnv('CartPole-v0') cartpole_lb, cartpole_ub = cartpole.observation_space.bounds ...
test one hot representation of observation space
test_one_hot_observation_space
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_task_remains_same_between_multiple_step_calls(self): """test if active_task remains same between multiple step calls""" envs = ['CartPole-v0', 'CartPole-v1'] mt_env = self._init_multi_env_wrapper( envs, sample_strategy=round_robin_strategy) mt_env.reset() tas...
test if active_task remains same between multiple step calls
test_task_remains_same_between_multiple_step_calls
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT
def test_one_hot_observation(self): """test if output of step function is correct""" envs = ['CartPole-v0', 'CartPole-v0'] mt_env = self._init_multi_env_wrapper( envs, sample_strategy=round_robin_strategy) obs, _ = mt_env.reset() assert (obs[-2:] == np.array([1., 0.]...
test if output of step function is correct
test_one_hot_observation
python
rlworkgroup/garage
tests/garage/envs/test_multi_env_wrapper.py
https://github.com/rlworkgroup/garage/blob/master/tests/garage/envs/test_multi_env_wrapper.py
MIT