diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6689075838f36f96f346794dae846542cac03a23 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62c5642106d870f9d2a110675e45b1fc300c2d3 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/catalog.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/catalog.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57633371ec9d88a05bddb44e18c8a56a6f0821ba Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/catalog.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/distributions.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3336354c4e3475cde8b8fa888ec2464469b5d840 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/distributions.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/modelv2.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/modelv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d4e72f839fe9ffd473642b9a1c5de9e2c5bb31 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/modelv2.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2da25d2cc2ce4a481431e0cd5a40286c0c8a66c3 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/repeated_values.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/repeated_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b5411075cc8d58e944f1e99825adf488b822343 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/repeated_values.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f606b860085afe95636f0677ac8672ed1c4d7df Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..86d33b39d455bcd6f43da151586444df41711b1c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py @@ -0,0 +1,11 @@ +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.tf.fcnet import FullyConnectedNetwork +from ray.rllib.models.tf.recurrent_net import RecurrentNetwork +from ray.rllib.models.tf.visionnet import VisionNetwork + +__all__ = [ + "FullyConnectedNetwork", + "RecurrentNetwork", + "TFModelV2", + "VisionNetwork", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..637b1b55cde9dc6f4ce59060a3b96834ae3c63dc Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/attention_net.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/attention_net.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca3ffd9477a259dd4b414d6d5091c460c2c1bb8 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/attention_net.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/complex_input_net.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/complex_input_net.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0759ec994b2f473725adadc5b5203a0a7fee0506 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/complex_input_net.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58c9e365dabba4b24fa6a612856576a2a309e0bc Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/misc.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66679f4cfedd408ced573ee0c71d4aa743aa9f96 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/misc.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a31f967a0481b53c18a61be8a0be78d1c86e8cfd Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/recurrent_net.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/recurrent_net.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a1438e1871ccd056a6406c750a12825b77e3be2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/recurrent_net.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7ca1233bbc0963b0e4fadd41eb76fb448f0b53 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..675b315602578b1057ebed95d2b9d3f9bbb48910 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33251d23584b2cc5c994407e522de4e13c609070 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/visionnet.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/visionnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5068292091014ddf791f306ce7955be6e368cc0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/visionnet.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py new file mode 100644 index 0000000000000000000000000000000000000000..886580fce177a0e075eb2d252ef869e181f5ae1b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py @@ -0,0 +1,573 @@ +""" +[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar, + Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017. + https://arxiv.org/pdf/1706.03762.pdf +[2] - Stabilizing Transformers for Reinforcement Learning - E. Parisotto + et al. - DeepMind - 2019. https://arxiv.org/pdf/1910.06764.pdf +[3] - Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context. + Z. Dai, Z. Yang, et al. - Carnegie Mellon U - 2019. + https://www.aclweb.org/anthology/P19-1285.pdf +""" +import gymnasium as gym +from gymnasium.spaces import Box, Discrete, MultiDiscrete +import numpy as np +import tree # pip install dm_tree +from typing import Any, Dict, Optional, Union + +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.layers import ( + GRUGate, + RelativeMultiHeadAttention, + SkipConnection, +) +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.tf.recurrent_net import RecurrentNetwork +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.view_requirement import ViewRequirement +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space +from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot +from ray.rllib.utils.typing import ModelConfigDict, TensorType, List +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +@OldAPIStack +class PositionwiseFeedforward(tf.keras.layers.Layer if tf else object): + """A 2x linear layer with ReLU activation in between described in [1]. + + Each timestep coming from the attention head will be passed through this + layer separately. + """ + + def __init__( + self, + out_dim: int, + hidden_dim: int, + output_activation: Optional[Any] = None, + **kwargs, + ): + super().__init__(**kwargs) + + self._hidden_layer = tf.keras.layers.Dense( + hidden_dim, + activation=tf.nn.relu, + ) + + self._output_layer = tf.keras.layers.Dense( + out_dim, activation=output_activation + ) + if log_once("positionwise_feedforward_tf"): + deprecation_warning( + old="rllib.models.tf.attention_net.PositionwiseFeedforward", + ) + + def call(self, inputs: TensorType, **kwargs) -> TensorType: + del kwargs + output = self._hidden_layer(inputs) + return self._output_layer(output) + + +@OldAPIStack +class TrXLNet(RecurrentNetwork): + """A TrXL net Model described in [1].""" + + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + num_transformer_units: int, + attention_dim: int, + num_heads: int, + head_dim: int, + position_wise_mlp_dim: int, + ): + """Initializes a TrXLNet object. + + Args: + num_transformer_units: The number of Transformer repeats to + use (denoted L in [2]). + attention_dim: The input and output dimensions of one + Transformer unit. + num_heads: The number of attention heads to use in parallel. + Denoted as `H` in [3]. + head_dim: The dimension of a single(!) attention head within + a multi-head attention unit. Denoted as `d` in [3]. + position_wise_mlp_dim: The dimension of the hidden layer + within the position-wise MLP (after the multi-head attention + block within one Transformer unit). This is the size of the + first of the two layers within the PositionwiseFeedforward. The + second layer always has size=`attention_dim`. + """ + if log_once("trxl_net_tf"): + deprecation_warning( + old="rllib.models.tf.attention_net.TrXLNet", + ) + super().__init__( + observation_space, action_space, num_outputs, model_config, name + ) + + self.num_transformer_units = num_transformer_units + self.attention_dim = attention_dim + self.num_heads = num_heads + self.head_dim = head_dim + self.max_seq_len = model_config["max_seq_len"] + self.obs_dim = observation_space.shape[0] + + inputs = tf.keras.layers.Input( + shape=(self.max_seq_len, self.obs_dim), name="inputs" + ) + E_out = tf.keras.layers.Dense(attention_dim)(inputs) + + for _ in range(self.num_transformer_units): + MHA_out = SkipConnection( + RelativeMultiHeadAttention( + out_dim=attention_dim, + num_heads=num_heads, + head_dim=head_dim, + input_layernorm=False, + output_activation=None, + ), + fan_in_layer=None, + )(E_out) + E_out = SkipConnection( + PositionwiseFeedforward(attention_dim, position_wise_mlp_dim) + )(MHA_out) + E_out = tf.keras.layers.LayerNormalization(axis=-1)(E_out) + + # Postprocess TrXL output with another hidden layer and compute values. + logits = tf.keras.layers.Dense( + self.num_outputs, activation=tf.keras.activations.linear, name="logits" + )(E_out) + + self.base_model = tf.keras.models.Model([inputs], [logits]) + + @override(RecurrentNetwork) + def forward_rnn( + self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType + ) -> (TensorType, List[TensorType]): + # To make Attention work with current RLlib's ModelV2 API: + # We assume `state` is the history of L recent observations (all + # concatenated into one tensor) and append the current inputs to the + # end and only keep the most recent (up to `max_seq_len`). This allows + # us to deal with timestep-wise inference and full sequence training + # within the same logic. + observations = state[0] + observations = tf.concat((observations, inputs), axis=1)[:, -self.max_seq_len :] + logits = self.base_model([observations]) + T = tf.shape(inputs)[1] # Length of input segment (time). + logits = logits[:, -T:] + + return logits, [observations] + + @override(RecurrentNetwork) + def get_initial_state(self) -> List[np.ndarray]: + # State is the T last observations concat'd together into one Tensor. + # Plus all Transformer blocks' E(l) outputs concat'd together (up to + # tau timesteps). + return [np.zeros((self.max_seq_len, self.obs_dim), np.float32)] + + +class GTrXLNet(RecurrentNetwork): + """A GTrXL net Model described in [2]. + + This is still in an experimental phase. + Can be used as a drop-in replacement for LSTMs in PPO and IMPALA. + + To use this network as a replacement for an RNN, configure your Algorithm + as follows: + + Examples: + >> config["model"]["custom_model"] = GTrXLNet + >> config["model"]["max_seq_len"] = 10 + >> config["model"]["custom_model_config"] = { + >> num_transformer_units=1, + >> attention_dim=32, + >> num_heads=2, + >> memory_inference=100, + >> memory_training=50, + >> etc.. + >> } + """ + + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: Optional[int], + model_config: ModelConfigDict, + name: str, + *, + num_transformer_units: int = 1, + attention_dim: int = 64, + num_heads: int = 2, + memory_inference: int = 50, + memory_training: int = 50, + head_dim: int = 32, + position_wise_mlp_dim: int = 32, + init_gru_gate_bias: float = 2.0, + ): + """Initializes a GTrXLNet instance. + + Args: + num_transformer_units: The number of Transformer repeats to + use (denoted L in [2]). + attention_dim: The input and output dimensions of one + Transformer unit. + num_heads: The number of attention heads to use in parallel. + Denoted as `H` in [3]. + memory_inference: The number of timesteps to concat (time + axis) and feed into the next transformer unit as inference + input. The first transformer unit will receive this number of + past observations (plus the current one), instead. + memory_training: The number of timesteps to concat (time + axis) and feed into the next transformer unit as training + input (plus the actual input sequence of len=max_seq_len). + The first transformer unit will receive this number of + past observations (plus the input sequence), instead. + head_dim: The dimension of a single(!) attention head within + a multi-head attention unit. Denoted as `d` in [3]. + position_wise_mlp_dim: The dimension of the hidden layer + within the position-wise MLP (after the multi-head attention + block within one Transformer unit). This is the size of the + first of the two layers within the PositionwiseFeedforward. The + second layer always has size=`attention_dim`. + init_gru_gate_bias: Initial bias values for the GRU gates + (two GRUs per Transformer unit, one after the MHA, one after + the position-wise MLP). + """ + super().__init__( + observation_space, action_space, num_outputs, model_config, name + ) + + self.num_transformer_units = num_transformer_units + self.attention_dim = attention_dim + self.num_heads = num_heads + self.memory_inference = memory_inference + self.memory_training = memory_training + self.head_dim = head_dim + self.max_seq_len = model_config["max_seq_len"] + self.obs_dim = observation_space.shape[0] + + # Raw observation input (plus (None) time axis). + input_layer = tf.keras.layers.Input(shape=(None, self.obs_dim), name="inputs") + memory_ins = [ + tf.keras.layers.Input( + shape=(None, self.attention_dim), + dtype=tf.float32, + name="memory_in_{}".format(i), + ) + for i in range(self.num_transformer_units) + ] + + # Map observation dim to input/output transformer (attention) dim. + E_out = tf.keras.layers.Dense(self.attention_dim)(input_layer) + # Output, collected and concat'd to build the internal, tau-len + # Memory units used for additional contextual information. + memory_outs = [E_out] + + # 2) Create L Transformer blocks according to [2]. + for i in range(self.num_transformer_units): + # RelativeMultiHeadAttention part. + MHA_out = SkipConnection( + RelativeMultiHeadAttention( + out_dim=self.attention_dim, + num_heads=num_heads, + head_dim=head_dim, + input_layernorm=True, + output_activation=tf.nn.relu, + ), + fan_in_layer=GRUGate(init_gru_gate_bias), + name="mha_{}".format(i + 1), + )(E_out, memory=memory_ins[i]) + # Position-wise MLP part. + E_out = SkipConnection( + tf.keras.Sequential( + ( + tf.keras.layers.LayerNormalization(axis=-1), + PositionwiseFeedforward( + out_dim=self.attention_dim, + hidden_dim=position_wise_mlp_dim, + output_activation=tf.nn.relu, + ), + ) + ), + fan_in_layer=GRUGate(init_gru_gate_bias), + name="pos_wise_mlp_{}".format(i + 1), + )(MHA_out) + # Output of position-wise MLP == E(l-1), which is concat'd + # to the current Mem block (M(l-1)) to yield E~(l-1), which is then + # used by the next transformer block. + memory_outs.append(E_out) + + self._logits = None + self._value_out = None + + # Postprocess TrXL output with another hidden layer and compute values. + if num_outputs is not None: + self._logits = tf.keras.layers.Dense( + self.num_outputs, activation=None, name="logits" + )(E_out) + values_out = tf.keras.layers.Dense(1, activation=None, name="values")(E_out) + outs = [self._logits, values_out] + else: + outs = [E_out] + self.num_outputs = self.attention_dim + + self.trxl_model = tf.keras.Model( + inputs=[input_layer] + memory_ins, outputs=outs + memory_outs[:-1] + ) + + self.trxl_model.summary() + + # __sphinx_doc_begin__ + # Setup trajectory views (`memory-inference` x past memory outs). + for i in range(self.num_transformer_units): + space = Box(-1.0, 1.0, shape=(self.attention_dim,)) + self.view_requirements["state_in_{}".format(i)] = ViewRequirement( + "state_out_{}".format(i), + shift="-{}:-1".format(self.memory_inference), + # Repeat the incoming state every max-seq-len times. + batch_repeat_value=self.max_seq_len, + space=space, + ) + self.view_requirements["state_out_{}".format(i)] = ViewRequirement( + space=space, used_for_training=False + ) + # __sphinx_doc_end__ + + @override(ModelV2) + def forward( + self, input_dict, state: List[TensorType], seq_lens: TensorType + ) -> (TensorType, List[TensorType]): + assert seq_lens is not None + + # Add the time dim to observations. + B = tf.shape(seq_lens)[0] + observations = input_dict[SampleBatch.OBS] + + shape = tf.shape(observations) + T = shape[0] // B + observations = tf.reshape(observations, tf.concat([[-1, T], shape[1:]], axis=0)) + + all_out = self.trxl_model([observations] + state) + + if self._logits is not None: + out = tf.reshape(all_out[0], [-1, self.num_outputs]) + self._value_out = all_out[1] + memory_outs = all_out[2:] + else: + out = tf.reshape(all_out[0], [-1, self.attention_dim]) + memory_outs = all_out[1:] + + return out, [tf.reshape(m, [-1, self.attention_dim]) for m in memory_outs] + + @override(RecurrentNetwork) + def get_initial_state(self) -> List[np.ndarray]: + return [ + tf.zeros(self.view_requirements["state_in_{}".format(i)].space.shape) + for i in range(self.num_transformer_units) + ] + + @override(ModelV2) + def value_function(self) -> TensorType: + return tf.reshape(self._value_out, [-1]) + + +class AttentionWrapper(TFModelV2): + """GTrXL wrapper serving as interface for ModelV2s that set use_attention.""" + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + ): + if log_once("attention_wrapper_tf_deprecation"): + deprecation_warning( + old="ray.rllib.models.tf.attention_net.AttentionWrapper" + ) + super().__init__(obs_space, action_space, None, model_config, name) + + self.use_n_prev_actions = model_config["attention_use_n_prev_actions"] + self.use_n_prev_rewards = model_config["attention_use_n_prev_rewards"] + + self.action_space_struct = get_base_struct_from_space(self.action_space) + self.action_dim = 0 + + for space in tree.flatten(self.action_space_struct): + if isinstance(space, Discrete): + self.action_dim += space.n + elif isinstance(space, MultiDiscrete): + self.action_dim += np.sum(space.nvec) + elif space.shape is not None: + self.action_dim += int(np.prod(space.shape)) + else: + self.action_dim += int(len(space)) + + # Add prev-action/reward nodes to input to LSTM. + if self.use_n_prev_actions: + self.num_outputs += self.use_n_prev_actions * self.action_dim + if self.use_n_prev_rewards: + self.num_outputs += self.use_n_prev_rewards + + cfg = model_config + + self.attention_dim = cfg["attention_dim"] + + if self.num_outputs is not None: + in_space = gym.spaces.Box( + float("-inf"), float("inf"), shape=(self.num_outputs,), dtype=np.float32 + ) + else: + in_space = obs_space + + # Construct GTrXL sub-module w/ num_outputs=None (so it does not + # create a logits/value output; we'll do this ourselves in this wrapper + # here). + self.gtrxl = GTrXLNet( + in_space, + action_space, + None, + model_config, + "gtrxl", + num_transformer_units=cfg["attention_num_transformer_units"], + attention_dim=self.attention_dim, + num_heads=cfg["attention_num_heads"], + head_dim=cfg["attention_head_dim"], + memory_inference=cfg["attention_memory_inference"], + memory_training=cfg["attention_memory_training"], + position_wise_mlp_dim=cfg["attention_position_wise_mlp_dim"], + init_gru_gate_bias=cfg["attention_init_gru_gate_bias"], + ) + + # `self.num_outputs` right now is the number of nodes coming from the + # attention net. + input_ = tf.keras.layers.Input(shape=(self.gtrxl.num_outputs,)) + + # Set final num_outputs to correct value (depending on action space). + self.num_outputs = num_outputs + + # Postprocess GTrXL output with another hidden layer and compute + # values. + out = tf.keras.layers.Dense(self.num_outputs, activation=None)(input_) + self._logits_branch = tf.keras.models.Model([input_], [out]) + + out = tf.keras.layers.Dense(1, activation=None)(input_) + self._value_branch = tf.keras.models.Model([input_], [out]) + + self.view_requirements = self.gtrxl.view_requirements + self.view_requirements["obs"].space = self.obs_space + + # Add prev-a/r to this model's view, if required. + if self.use_n_prev_actions: + self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement( + SampleBatch.ACTIONS, + space=self.action_space, + shift="-{}:-1".format(self.use_n_prev_actions), + ) + if self.use_n_prev_rewards: + self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement( + SampleBatch.REWARDS, shift="-{}:-1".format(self.use_n_prev_rewards) + ) + + @override(RecurrentNetwork) + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> (TensorType, List[TensorType]): + assert seq_lens is not None + # Push obs through "unwrapped" net's `forward()` first. + wrapped_out, _ = self._wrapped_forward(input_dict, [], None) + + # Concat. prev-action/reward if required. + prev_a_r = [] + + # Prev actions. + if self.use_n_prev_actions: + prev_n_actions = input_dict[SampleBatch.PREV_ACTIONS] + # If actions are not processed yet (in their original form as + # have been sent to environment): + # Flatten/one-hot into 1D array. + if self.model_config["_disable_action_flattening"]: + # Merge prev n actions into flat tensor. + flat = flatten_inputs_to_1d_tensor( + prev_n_actions, + spaces_struct=self.action_space_struct, + time_axis=True, + ) + # Fold time-axis into flattened data. + flat = tf.reshape(flat, [tf.shape(flat)[0], -1]) + prev_a_r.append(flat) + # If actions are already flattened (but not one-hot'd yet!), + # one-hot discrete/multi-discrete actions here and concatenate the + # n most recent actions together. + else: + if isinstance(self.action_space, Discrete): + for i in range(self.use_n_prev_actions): + prev_a_r.append( + one_hot(prev_n_actions[:, i], self.action_space) + ) + elif isinstance(self.action_space, MultiDiscrete): + for i in range( + 0, self.use_n_prev_actions, self.action_space.shape[0] + ): + prev_a_r.append( + one_hot( + tf.cast( + prev_n_actions[ + :, i : i + self.action_space.shape[0] + ], + tf.float32, + ), + space=self.action_space, + ) + ) + else: + prev_a_r.append( + tf.reshape( + tf.cast(prev_n_actions, tf.float32), + [-1, self.use_n_prev_actions * self.action_dim], + ) + ) + # Prev rewards. + if self.use_n_prev_rewards: + prev_a_r.append( + tf.reshape( + tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32), + [-1, self.use_n_prev_rewards], + ) + ) + + # Concat prev. actions + rewards to the "main" input. + if prev_a_r: + wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1) + + # Then through our GTrXL. + input_dict["obs_flat"] = input_dict["obs"] = wrapped_out + + self._features, memory_outs = self.gtrxl(input_dict, state, seq_lens) + model_out = self._logits_branch(self._features) + return model_out, memory_outs + + @override(ModelV2) + def value_function(self) -> TensorType: + assert self._features is not None, "Must call forward() first!" + return tf.reshape(self._value_branch(self._features), [-1]) + + @override(ModelV2) + def get_initial_state(self) -> Union[List[np.ndarray], List[TensorType]]: + return [ + np.zeros(self.gtrxl.view_requirements["state_in_{}".format(i)].space.shape) + for i in range(self.gtrxl.num_transformer_units) + ] diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c41be4067a1473107a39a1f4f7ec94d8a99f27 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py @@ -0,0 +1,214 @@ +from gymnasium.spaces import Box, Discrete, MultiDiscrete +import numpy as np +import tree # pip install dm_tree + +from ray.rllib.models.catalog import ModelCatalog +from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions +from ray.rllib.models.tf.misc import normc_initializer +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.utils import get_filter_config +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.spaces.space_utils import flatten_space +from ray.rllib.utils.tf_utils import one_hot + +tf1, tf, tfv = try_import_tf() + + +# __sphinx_doc_begin__ +@OldAPIStack +class ComplexInputNetwork(TFModelV2): + """TFModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s). + + Note: This model should be used for complex (Dict or Tuple) observation + spaces that have one or more image components. + + The data flow is as follows: + + `obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT` + `CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out` + `out` -> (optional) FC-stack -> `out2` + `out2` -> action (logits) and vaulue heads. + """ + + def __init__(self, obs_space, action_space, num_outputs, model_config, name): + + self.original_space = ( + obs_space.original_space + if hasattr(obs_space, "original_space") + else obs_space + ) + + self.processed_obs_space = ( + self.original_space + if model_config.get("_disable_preprocessor_api") + else obs_space + ) + super().__init__( + self.original_space, action_space, num_outputs, model_config, name + ) + + self.flattened_input_space = flatten_space(self.original_space) + + # Build the CNN(s) given obs_space's image components. + self.cnns = {} + self.one_hot = {} + self.flatten_dims = {} + self.flatten = {} + concat_size = 0 + for i, component in enumerate(self.flattened_input_space): + # Image space. + if len(component.shape) == 3 and isinstance(component, Box): + config = { + "conv_filters": model_config["conv_filters"] + if "conv_filters" in model_config + else get_filter_config(component.shape), + "conv_activation": model_config.get("conv_activation"), + "post_fcnet_hiddens": [], + } + self.cnns[i] = ModelCatalog.get_model_v2( + component, + action_space, + num_outputs=None, + model_config=config, + framework="tf", + name="cnn_{}".format(i), + ) + concat_size += int(self.cnns[i].num_outputs) + # Discrete|MultiDiscrete inputs -> One-hot encode. + elif isinstance(component, (Discrete, MultiDiscrete)): + if isinstance(component, Discrete): + size = component.n + else: + size = np.sum(component.nvec) + config = { + "fcnet_hiddens": model_config["fcnet_hiddens"], + "fcnet_activation": model_config.get("fcnet_activation"), + "post_fcnet_hiddens": [], + } + self.one_hot[i] = ModelCatalog.get_model_v2( + Box(-1.0, 1.0, (size,), np.float32), + action_space, + num_outputs=None, + model_config=config, + framework="tf", + name="one_hot_{}".format(i), + ) + concat_size += int(self.one_hot[i].num_outputs) + # Everything else (1D Box). + else: + size = int(np.prod(component.shape)) + config = { + "fcnet_hiddens": model_config["fcnet_hiddens"], + "fcnet_activation": model_config.get("fcnet_activation"), + "post_fcnet_hiddens": [], + } + self.flatten[i] = ModelCatalog.get_model_v2( + Box(-1.0, 1.0, (size,), np.float32), + action_space, + num_outputs=None, + model_config=config, + framework="tf", + name="flatten_{}".format(i), + ) + self.flatten_dims[i] = size + concat_size += int(self.flatten[i].num_outputs) + + # Optional post-concat FC-stack. + post_fc_stack_config = { + "fcnet_hiddens": model_config.get("post_fcnet_hiddens", []), + "fcnet_activation": model_config.get("post_fcnet_activation", "relu"), + } + self.post_fc_stack = ModelCatalog.get_model_v2( + Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32), + self.action_space, + None, + post_fc_stack_config, + framework="tf", + name="post_fc_stack", + ) + + # Actions and value heads. + self.logits_and_value_model = None + self._value_out = None + if num_outputs: + # Action-distribution head. + concat_layer = tf.keras.layers.Input((self.post_fc_stack.num_outputs,)) + logits_layer = tf.keras.layers.Dense( + num_outputs, + activation=None, + kernel_initializer=normc_initializer(0.01), + name="logits", + )(concat_layer) + + # Create the value branch model. + value_layer = tf.keras.layers.Dense( + 1, + activation=None, + kernel_initializer=normc_initializer(0.01), + name="value_out", + )(concat_layer) + self.logits_and_value_model = tf.keras.models.Model( + concat_layer, [logits_layer, value_layer] + ) + else: + self.num_outputs = self.post_fc_stack.num_outputs + + @override(ModelV2) + def forward(self, input_dict, state, seq_lens): + if SampleBatch.OBS in input_dict and "obs_flat" in input_dict: + orig_obs = input_dict[SampleBatch.OBS] + else: + orig_obs = restore_original_dimensions( + input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="tf" + ) + # Push image observations through our CNNs. + outs = [] + for i, component in enumerate(tree.flatten(orig_obs)): + if i in self.cnns: + cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component})) + outs.append(cnn_out) + elif i in self.one_hot: + if "int" in component.dtype.name: + one_hot_in = { + SampleBatch.OBS: one_hot( + component, self.flattened_input_space[i] + ) + } + else: + one_hot_in = {SampleBatch.OBS: component} + one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in)) + outs.append(one_hot_out) + else: + nn_out, _ = self.flatten[i]( + SampleBatch( + { + SampleBatch.OBS: tf.cast( + tf.reshape(component, [-1, self.flatten_dims[i]]), + tf.float32, + ) + } + ) + ) + outs.append(nn_out) + # Concat all outputs and the non-image inputs. + out = tf.concat(outs, axis=1) + # Push through (optional) FC-stack (this may be an empty stack). + out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out})) + + # No logits/value branches. + if not self.logits_and_value_model: + return out, [] + + # Logits- and value branches. + logits, values = self.logits_and_value_model(out) + self._value_out = tf.reshape(values, [-1]) + return logits, [] + + @override(ModelV2) + def value_function(self): + return self._value_out + + +# __sphinx_doc_end__ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py new file mode 100644 index 0000000000000000000000000000000000000000..56a09de0361acf371f5d6b8ab60fc7d2790565c7 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py @@ -0,0 +1,148 @@ +import numpy as np +import gymnasium as gym +from typing import Dict + +from ray.rllib.models.tf.misc import normc_initializer +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.utils import get_activation_fn +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType, List, ModelConfigDict + +tf1, tf, tfv = try_import_tf() + + +@OldAPIStack +class FullyConnectedNetwork(TFModelV2): + """Generic fully connected network implemented in ModelV2 API.""" + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + ): + super(FullyConnectedNetwork, self).__init__( + obs_space, action_space, num_outputs, model_config, name + ) + + hiddens = list(model_config.get("fcnet_hiddens", [])) + list( + model_config.get("post_fcnet_hiddens", []) + ) + activation = model_config.get("fcnet_activation") + if not model_config.get("fcnet_hiddens", []): + activation = model_config.get("post_fcnet_activation") + activation = get_activation_fn(activation) + no_final_linear = model_config.get("no_final_linear") + vf_share_layers = model_config.get("vf_share_layers") + free_log_std = model_config.get("free_log_std") + + # Generate free-floating bias variables for the second half of + # the outputs. + if free_log_std: + assert num_outputs % 2 == 0, ( + "num_outputs must be divisible by two", + num_outputs, + ) + num_outputs = num_outputs // 2 + self.log_std_var = tf.Variable( + [0.0] * num_outputs, dtype=tf.float32, name="log_std" + ) + + # We are using obs_flat, so take the flattened shape as input. + inputs = tf.keras.layers.Input( + shape=(int(np.prod(obs_space.shape)),), name="observations" + ) + # Last hidden layer output (before logits outputs). + last_layer = inputs + # The action distribution outputs. + logits_out = None + i = 1 + + # Create layers 0 to second-last. + for size in hiddens[:-1]: + last_layer = tf.keras.layers.Dense( + size, + name="fc_{}".format(i), + activation=activation, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + i += 1 + + # The last layer is adjusted to be of size num_outputs, but it's a + # layer with activation. + if no_final_linear and num_outputs: + logits_out = tf.keras.layers.Dense( + num_outputs, + name="fc_out", + activation=activation, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + # Finish the layers with the provided sizes (`hiddens`), plus - + # iff num_outputs > 0 - a last linear layer of size num_outputs. + else: + if len(hiddens) > 0: + last_layer = tf.keras.layers.Dense( + hiddens[-1], + name="fc_{}".format(i), + activation=activation, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + if num_outputs: + logits_out = tf.keras.layers.Dense( + num_outputs, + name="fc_out", + activation=None, + kernel_initializer=normc_initializer(0.01), + )(last_layer) + # Adjust num_outputs to be the number of nodes in the last layer. + else: + self.num_outputs = ([int(np.prod(obs_space.shape))] + hiddens[-1:])[-1] + + # Concat the log std vars to the end of the state-dependent means. + if free_log_std and logits_out is not None: + + def tiled_log_std(x): + return tf.tile(tf.expand_dims(self.log_std_var, 0), [tf.shape(x)[0], 1]) + + log_std_out = tf.keras.layers.Lambda(tiled_log_std)(inputs) + logits_out = tf.keras.layers.Concatenate(axis=1)([logits_out, log_std_out]) + + last_vf_layer = None + if not vf_share_layers: + # Build a parallel set of hidden layers for the value net. + last_vf_layer = inputs + i = 1 + for size in hiddens: + last_vf_layer = tf.keras.layers.Dense( + size, + name="fc_value_{}".format(i), + activation=activation, + kernel_initializer=normc_initializer(1.0), + )(last_vf_layer) + i += 1 + + value_out = tf.keras.layers.Dense( + 1, + name="value_out", + activation=None, + kernel_initializer=normc_initializer(0.01), + )(last_vf_layer if last_vf_layer is not None else last_layer) + + self.base_model = tf.keras.Model( + inputs, [(logits_out if logits_out is not None else last_layer), value_out] + ) + + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> (TensorType, List[TensorType]): + model_out, self._value_out = self.base_model(input_dict["obs_flat"]) + return model_out, state + + def value_function(self) -> TensorType: + return tf.reshape(self._value_out, [-1]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b840c42b17eff05c8e0c7d440a0f2963c9ed35a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__init__.py @@ -0,0 +1,17 @@ +from ray.rllib.models.tf.layers.gru_gate import GRUGate +from ray.rllib.models.tf.layers.noisy_layer import NoisyLayer +from ray.rllib.models.tf.layers.relative_multi_head_attention import ( + PositionalEmbedding, + RelativeMultiHeadAttention, +) +from ray.rllib.models.tf.layers.skip_connection import SkipConnection +from ray.rllib.models.tf.layers.multi_head_attention import MultiHeadAttention + +__all__ = [ + "GRUGate", + "MultiHeadAttention", + "NoisyLayer", + "PositionalEmbedding", + "RelativeMultiHeadAttention", + "SkipConnection", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d5dbcde2ea698bf9f141963819f80bd58cb6bb7 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/gru_gate.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/gru_gate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..862b1d9a188f69887b631cfe501f9562ac514ae2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/gru_gate.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a91d7327d3cfbb808f026c5b5ae1e04750c49ad6 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/noisy_layer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/noisy_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d3c9f6957df5431868c25aa5d7119096e97de27 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/noisy_layer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/relative_multi_head_attention.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/relative_multi_head_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12e9ffad0199e5a7476578e1ce0dd812fd0c2b44 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/relative_multi_head_attention.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8955c3b47bfb12f14937646bed655d98002b7d5d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/gru_gate.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/gru_gate.py new file mode 100644 index 0000000000000000000000000000000000000000..a41b23bbf534a15d0d3c71333bcba1bb0c0a6d3b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/gru_gate.py @@ -0,0 +1,58 @@ +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType, TensorShape +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +class GRUGate(tf.keras.layers.Layer if tf else object): + def __init__(self, init_bias: float = 0.0, **kwargs): + super().__init__(**kwargs) + self._init_bias = init_bias + if log_once("gru_gate"): + deprecation_warning( + old="rllib.models.tf.layers.GRUGate", + ) + + def build(self, input_shape: TensorShape): + h_shape, x_shape = input_shape + if x_shape[-1] != h_shape[-1]: + raise ValueError( + "Both inputs to GRUGate must have equal size in last axis!" + ) + + dim = int(h_shape[-1]) + self._w_r = self.add_weight(shape=(dim, dim)) + self._w_z = self.add_weight(shape=(dim, dim)) + self._w_h = self.add_weight(shape=(dim, dim)) + + self._u_r = self.add_weight(shape=(dim, dim)) + self._u_z = self.add_weight(shape=(dim, dim)) + self._u_h = self.add_weight(shape=(dim, dim)) + + def bias_initializer(shape, dtype): + return tf.fill(shape, tf.cast(self._init_bias, dtype=dtype)) + + self._bias_z = self.add_weight(shape=(dim,), initializer=bias_initializer) + + def call(self, inputs: TensorType, **kwargs) -> TensorType: + # Pass in internal state first. + h, X = inputs + + r = tf.tensordot(X, self._w_r, axes=1) + tf.tensordot(h, self._u_r, axes=1) + r = tf.nn.sigmoid(r) + + z = ( + tf.tensordot(X, self._w_z, axes=1) + + tf.tensordot(h, self._u_z, axes=1) + - self._bias_z + ) + z = tf.nn.sigmoid(z) + + h_next = tf.tensordot(X, self._w_h, axes=1) + tf.tensordot( + (h * r), self._u_h, axes=1 + ) + h_next = tf.nn.tanh(h_next) + + return (1 - z) * h + z * h_next diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/multi_head_attention.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/multi_head_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..595608989f0b7da66e640a041289ae646cb36ae4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/multi_head_attention.py @@ -0,0 +1,61 @@ +""" +[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar, + Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017. + https://arxiv.org/pdf/1706.03762.pdf +""" +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +class MultiHeadAttention(tf.keras.layers.Layer if tf else object): + """A multi-head attention layer described in [1].""" + + def __init__(self, out_dim: int, num_heads: int, head_dim: int, **kwargs): + super().__init__(**kwargs) + + # No bias or non-linearity. + self._num_heads = num_heads + self._head_dim = head_dim + self._qkv_layer = tf.keras.layers.Dense( + 3 * num_heads * head_dim, use_bias=False + ) + self._linear_layer = tf.keras.layers.TimeDistributed( + tf.keras.layers.Dense(out_dim, use_bias=False) + ) + if log_once("multi_head_attention"): + deprecation_warning( + old="rllib.models.tf.layers.MultiHeadAttention", + ) + + def call(self, inputs: TensorType) -> TensorType: + L = tf.shape(inputs)[1] # length of segment + H = self._num_heads # number of attention heads + D = self._head_dim # attention head dimension + + qkv = self._qkv_layer(inputs) + + queries, keys, values = tf.split(qkv, 3, -1) + queries = queries[:, -L:] # only query based on the segment + + queries = tf.reshape(queries, [-1, L, H, D]) + keys = tf.reshape(keys, [-1, L, H, D]) + values = tf.reshape(values, [-1, L, H, D]) + + score = tf.einsum("bihd,bjhd->bijh", queries, keys) + score = score / D**0.5 + + # causal mask of the same length as the sequence + mask = tf.sequence_mask(tf.range(1, L + 1), dtype=score.dtype) + mask = mask[None, :, :, None] + + masked_score = score * mask + 1e30 * (mask - 1.0) + wmat = tf.nn.softmax(masked_score, axis=2) + + out = tf.einsum("bijh,bjhd->bihd", wmat, values) + shape = tf.concat([tf.shape(out)[:2], [H * D]], axis=0) + out = tf.reshape(out, shape) + return self._linear_layer(out) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/noisy_layer.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/noisy_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc149d5de13beee0e77fcda069e321850507633 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/noisy_layer.py @@ -0,0 +1,118 @@ +import numpy as np + +from ray.rllib.models.utils import get_activation_fn +from ray.rllib.utils.framework import ( + get_variable, + try_import_tf, + TensorType, + TensorShape, +) +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +class NoisyLayer(tf.keras.layers.Layer if tf else object): + r"""A Layer that adds learnable Noise to some previous layer's outputs. + + Consists of: + - a common dense layer: y = w^{T}x + b + - a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x + + (b+\epsilon_b*\sigma_b) + , where \epsilon are random variables sampled from factorized normal + distributions and \sigma are trainable variables which are expected to + vanish along the training procedure. + """ + + def __init__( + self, prefix: str, out_size: int, sigma0: float, activation: str = "relu" + ): + """Initializes a NoisyLayer object. + + Args: + prefix: + out_size: Output size for Noisy Layer + sigma0: Initialization value for sigma_b (bias noise) + non_linear: Non-linear activation for Noisy Layer + """ + super().__init__() + self.prefix = prefix + self.out_size = out_size + # TF noise generation can be unreliable on GPU + # If generating the noise on the CPU, + # lowering sigma0 to 0.1 may be helpful + self.sigma0 = sigma0 # 0.5~GPU, 0.1~CPU + self.activation = activation + # Variables. + self.w = None # Weight matrix. + self.b = None # Biases. + self.sigma_w = None # Noise for weight matrix + self.sigma_b = None # Noise for biases. + if log_once("noisy_layer"): + deprecation_warning( + old="rllib.models.tf.layers.NoisyLayer", + ) + + def build(self, input_shape: TensorShape): + in_size = int(input_shape[1]) + + self.sigma_w = get_variable( + value=tf.keras.initializers.RandomUniform( + minval=-1.0 / np.sqrt(float(in_size)), + maxval=1.0 / np.sqrt(float(in_size)), + ), + trainable=True, + tf_name=self.prefix + "_sigma_w", + shape=[in_size, self.out_size], + dtype=tf.float32, + ) + + self.sigma_b = get_variable( + value=tf.keras.initializers.Constant(self.sigma0 / np.sqrt(float(in_size))), + trainable=True, + tf_name=self.prefix + "_sigma_b", + shape=[self.out_size], + dtype=tf.float32, + ) + + self.w = get_variable( + value=tf.keras.initializers.GlorotUniform(), + tf_name=self.prefix + "_fc_w", + trainable=True, + shape=[in_size, self.out_size], + dtype=tf.float32, + ) + + self.b = get_variable( + value=tf.keras.initializers.Zeros(), + tf_name=self.prefix + "_fc_b", + trainable=True, + shape=[self.out_size], + dtype=tf.float32, + ) + + def call(self, inputs: TensorType) -> TensorType: + in_size = int(inputs.shape[1]) + epsilon_in = tf.random.normal(shape=[in_size]) + epsilon_out = tf.random.normal(shape=[self.out_size]) + epsilon_in = self._f_epsilon(epsilon_in) + epsilon_out = self._f_epsilon(epsilon_out) + epsilon_w = tf.matmul( + a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0) + ) + epsilon_b = epsilon_out + + action_activation = ( + tf.matmul(inputs, self.w + self.sigma_w * epsilon_w) + + self.b + + self.sigma_b * epsilon_b + ) + + fn = get_activation_fn(self.activation, framework="tf") + if fn is not None: + action_activation = fn(action_activation) + return action_activation + + def _f_epsilon(self, x: TensorType) -> TensorType: + return tf.math.sign(x) * tf.math.sqrt(tf.math.abs(x)) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..f88486ff20516c19fcebdab3718fc829591215fb --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py @@ -0,0 +1,147 @@ +from typing import Optional + +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +class RelativeMultiHeadAttention(tf.keras.layers.Layer if tf else object): + """A RelativeMultiHeadAttention layer as described in [3]. + + Uses segment level recurrence with state reuse. + """ + + def __init__( + self, + out_dim: int, + num_heads: int, + head_dim: int, + input_layernorm: bool = False, + output_activation: Optional["tf.nn.activation"] = None, + **kwargs + ): + """Initializes a RelativeMultiHeadAttention keras Layer object. + + Args: + out_dim: The output dimensions of the multi-head attention + unit. + num_heads: The number of attention heads to use. + Denoted `H` in [2]. + head_dim: The dimension of a single(!) attention head within + a multi-head attention unit. Denoted as `d` in [3]. + input_layernorm: Whether to prepend a LayerNorm before + everything else. Should be True for building a GTrXL. + output_activation (Optional[tf.nn.activation]): Optional tf.nn + activation function. Should be relu for GTrXL. + **kwargs: + """ + if log_once("relative_multi_head_attention"): + deprecation_warning( + old="rllib.models.tf.layers.RelativeMultiHeadAttention", + ) + super().__init__(**kwargs) + + # No bias or non-linearity. + self._num_heads = num_heads + self._head_dim = head_dim + # 3=Query, key, and value inputs. + self._qkv_layer = tf.keras.layers.Dense( + 3 * num_heads * head_dim, use_bias=False + ) + self._linear_layer = tf.keras.layers.TimeDistributed( + tf.keras.layers.Dense(out_dim, use_bias=False, activation=output_activation) + ) + + self._uvar = self.add_weight(shape=(num_heads, head_dim)) + self._vvar = self.add_weight(shape=(num_heads, head_dim)) + + # Constant (non-trainable) sinusoid rel pos encoding matrix, which + # depends on this incoming time dimension. + # For inference, we prepend the memory to the current timestep's + # input: Tau + 1. For training, we prepend the memory to the input + # sequence: Tau + T. + self._pos_embedding = PositionalEmbedding(out_dim) + self._pos_proj = tf.keras.layers.Dense(num_heads * head_dim, use_bias=False) + + self._input_layernorm = None + if input_layernorm: + self._input_layernorm = tf.keras.layers.LayerNormalization(axis=-1) + + def call( + self, inputs: TensorType, memory: Optional[TensorType] = None + ) -> TensorType: + T = tf.shape(inputs)[1] # length of segment (time) + H = self._num_heads # number of attention heads + d = self._head_dim # attention head dimension + + # Add previous memory chunk (as const, w/o gradient) to input. + # Tau (number of (prev) time slices in each memory chunk). + Tau = tf.shape(memory)[1] + inputs = tf.concat([tf.stop_gradient(memory), inputs], axis=1) + + # Apply the Layer-Norm. + if self._input_layernorm is not None: + inputs = self._input_layernorm(inputs) + + qkv = self._qkv_layer(inputs) + + queries, keys, values = tf.split(qkv, 3, -1) + # Cut out memory timesteps from query. + queries = queries[:, -T:] + + # Splitting up queries into per-head dims (d). + queries = tf.reshape(queries, [-1, T, H, d]) + keys = tf.reshape(keys, [-1, Tau + T, H, d]) + values = tf.reshape(values, [-1, Tau + T, H, d]) + + R = self._pos_embedding(Tau + T) + R = self._pos_proj(R) + R = tf.reshape(R, [Tau + T, H, d]) + + # b=batch + # i and j=time indices (i=max-timesteps (inputs); j=Tau memory space) + # h=head + # d=head-dim (over which we will reduce-sum) + score = tf.einsum("bihd,bjhd->bijh", queries + self._uvar, keys) + pos_score = tf.einsum("bihd,jhd->bijh", queries + self._vvar, R) + score = score + self.rel_shift(pos_score) + score = score / d**0.5 + + # Causal mask of the same length as the sequence. + mask = tf.sequence_mask(tf.range(Tau + 1, Tau + T + 1), dtype=score.dtype) + mask = mask[None, :, :, None] + + masked_score = score * mask + 1e30 * (mask - 1.0) + wmat = tf.nn.softmax(masked_score, axis=2) + + out = tf.einsum("bijh,bjhd->bihd", wmat, values) + out = tf.reshape(out, tf.concat((tf.shape(out)[:2], [H * d]), axis=0)) + return self._linear_layer(out) + + @staticmethod + def rel_shift(x: TensorType) -> TensorType: + # Transposed version of the shift approach described in [3]. + # https://github.com/kimiyoung/transformer-xl/blob/ + # 44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L31 + x_size = tf.shape(x) + + x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]]) + x = tf.reshape(x, [x_size[0], x_size[2] + 1, x_size[1], x_size[3]]) + x = x[:, 1:, :, :] + x = tf.reshape(x, x_size) + + return x + + +class PositionalEmbedding(tf.keras.layers.Layer if tf else object): + def __init__(self, out_dim, **kwargs): + super().__init__(**kwargs) + self.inverse_freq = 1 / (10000 ** (tf.range(0, out_dim, 2.0) / out_dim)) + + def call(self, seq_length): + pos_offsets = tf.cast(tf.range(seq_length - 1, -1, -1), tf.float32) + inputs = pos_offsets[:, None] * self.inverse_freq[None, :] + return tf.concat((tf.sin(inputs), tf.cos(inputs)), axis=-1) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/skip_connection.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/skip_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee1751caf36e4a760da3c60c08fe279400dcb12 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/skip_connection.py @@ -0,0 +1,46 @@ +from typing import Optional, Any + +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util import log_once + +tf1, tf, tfv = try_import_tf() + + +class SkipConnection(tf.keras.layers.Layer if tf else object): + """Skip connection layer. + + Adds the original input to the output (regular residual layer) OR uses + input as hidden state input to a given fan_in_layer. + """ + + def __init__(self, layer: Any, fan_in_layer: Optional[Any] = None, **kwargs): + """Initializes a SkipConnection keras layer object. + + Args: + layer (tf.keras.layers.Layer): Any layer processing inputs. + fan_in_layer (Optional[tf.keras.layers.Layer]): An optional + layer taking two inputs: The original input and the output + of `layer`. + """ + if log_once("skip_connection"): + deprecation_warning( + old="rllib.models.tf.layers.SkipConnection", + ) + super().__init__(**kwargs) + self._layer = layer + self._fan_in_layer = fan_in_layer + + def call(self, inputs: TensorType, **kwargs) -> TensorType: + # del kwargs + outputs = self._layer(inputs, **kwargs) + # Residual case, just add inputs to outputs. + if self._fan_in_layer is None: + outputs = outputs + inputs + # Fan-in e.g. RNN: Call fan-in with `inputs` and `outputs`. + else: + # NOTE: In the GRU case, `inputs` is the state input. + outputs = self._fan_in_layer((inputs, outputs)) + + return outputs diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/misc.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea75e423c2d66756b9e899faa2c6487dd57cab4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/misc.py @@ -0,0 +1,90 @@ +import numpy as np +from typing import Tuple, Any, Optional + +from ray.rllib.utils.annotations import DeveloperAPI +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import TensorType + +tf1, tf, tfv = try_import_tf() + + +# TODO: (sven) obsolete this class. +@DeveloperAPI +def normc_initializer(std: float = 1.0) -> Any: + def _initializer(shape, dtype=None, partition_info=None): + out = np.random.randn(*shape).astype( + dtype.name if hasattr(dtype, "name") else dtype or np.float32 + ) + out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) + return tf.constant(out) + + return _initializer + + +@DeveloperAPI +def conv2d( + x: TensorType, + num_filters: int, + name: str, + filter_size: Tuple[int, int] = (3, 3), + stride: Tuple[int, int] = (1, 1), + pad: str = "SAME", + dtype: Optional[Any] = None, + collections: Optional[Any] = None, +) -> TensorType: + + if dtype is None: + dtype = tf.float32 + + with tf1.variable_scope(name): + stride_shape = [1, stride[0], stride[1], 1] + filter_shape = [ + filter_size[0], + filter_size[1], + int(x.get_shape()[3]), + num_filters, + ] + + # There are "num input feature maps * filter height * filter width" + # inputs to each hidden unit. + fan_in = np.prod(filter_shape[:3]) + # Each unit in the lower layer receives a gradient from: "num output + # feature maps * filter height * filter width" / pooling size. + fan_out = np.prod(filter_shape[:2]) * num_filters + # Initialize weights with random weights. + w_bound = np.sqrt(6 / (fan_in + fan_out)) + + w = tf1.get_variable( + "W", + filter_shape, + dtype, + tf1.random_uniform_initializer(-w_bound, w_bound), + collections=collections, + ) + b = tf1.get_variable( + "b", + [1, 1, 1, num_filters], + initializer=tf1.constant_initializer(0.0), + collections=collections, + ) + return tf1.nn.conv2d(x, w, stride_shape, pad) + b + + +@DeveloperAPI +def linear( + x: TensorType, + size: int, + name: str, + initializer: Optional[Any] = None, + bias_init: float = 0.0, +) -> TensorType: + w = tf1.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer) + b = tf1.get_variable( + name + "/b", [size], initializer=tf1.constant_initializer(bias_init) + ) + return tf.matmul(x, w) + b + + +@DeveloperAPI +def flatten(x: TensorType) -> TensorType: + return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/noop.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/noop.py new file mode 100644 index 0000000000000000000000000000000000000000..30d91988e3f1dd0cb003f6325a2e30bb7af71373 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/noop.py @@ -0,0 +1,17 @@ +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_tf + +_, tf, _ = try_import_tf() + + +@OldAPIStack +class NoopModel(TFModelV2): + """Trivial model that just returns the obs flattened. + + This is the model used if use_state_preprocessor=False.""" + + @override(ModelV2) + def forward(self, input_dict, state, seq_lens): + return tf.cast(input_dict["obs_flat"], tf.float32), state diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py new file mode 100644 index 0000000000000000000000000000000000000000..2010d4a901188a53cec5d2766e865392c1d7f9d0 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py @@ -0,0 +1,292 @@ +import numpy as np +import gymnasium as gym +from gymnasium.spaces import Discrete, MultiDiscrete +import logging +import tree # pip install dm_tree +from typing import Dict, List, Tuple + +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.policy.rnn_sequencing import add_time_dimension +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.view_requirement import ViewRequirement +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space +from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot +from ray.rllib.utils.typing import ModelConfigDict, TensorType +from ray.rllib.utils.deprecation import deprecation_warning +from ray.util.debug import log_once + +tf1, tf, tfv = try_import_tf() +logger = logging.getLogger(__name__) + + +@OldAPIStack +class RecurrentNetwork(TFModelV2): + """Helper class to simplify implementing RNN models with TFModelV2. + + Instead of implementing forward(), you can implement forward_rnn() which + takes batches with the time dimension added already. + + Here is an example implementation for a subclass + ``MyRNNClass(RecurrentNetwork)``:: + + def __init__(self, *args, **kwargs): + super(MyModelClass, self).__init__(*args, **kwargs) + cell_size = 256 + + # Define input layers + input_layer = tf.keras.layers.Input( + shape=(None, obs_space.shape[0])) + state_in_h = tf.keras.layers.Input(shape=(256, )) + state_in_c = tf.keras.layers.Input(shape=(256, )) + seq_in = tf.keras.layers.Input(shape=(), dtype=tf.int32) + + # Send to LSTM cell + lstm_out, state_h, state_c = tf.keras.layers.LSTM( + cell_size, return_sequences=True, return_state=True, + name="lstm")( + inputs=input_layer, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c]) + output_layer = tf.keras.layers.Dense(...)(lstm_out) + + # Create the RNN model + self.rnn_model = tf.keras.Model( + inputs=[input_layer, seq_in, state_in_h, state_in_c], + outputs=[output_layer, state_h, state_c]) + self.rnn_model.summary() + """ + + @override(ModelV2) + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> Tuple[TensorType, List[TensorType]]: + """Adds time dimension to batch before sending inputs to forward_rnn(). + + You should implement forward_rnn() in your subclass.""" + # Creating a __init__ function that acts as a passthrough and adding the warning + # there led to errors probably due to the multiple inheritance. We encountered + # the same error if we add the Deprecated decorator. We therefore add the + # deprecation warning here. + if log_once("recurrent_network_tf"): + deprecation_warning( + old="ray.rllib.models.tf.recurrent_net.RecurrentNetwork" + ) + assert seq_lens is not None + flat_inputs = input_dict["obs_flat"] + inputs = add_time_dimension( + padded_inputs=flat_inputs, seq_lens=seq_lens, framework="tf" + ) + output, new_state = self.forward_rnn( + inputs, + state, + seq_lens, + ) + return tf.reshape(output, [-1, self.num_outputs]), new_state + + def forward_rnn( + self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType + ) -> Tuple[TensorType, List[TensorType]]: + """Call the model with the given input tensors and state. + + Args: + inputs: observation tensor with shape [B, T, obs_size]. + state: list of state tensors, each with shape [B, T, size]. + seq_lens: 1d tensor holding input sequence lengths. + + Returns: + (outputs, new_state): The model output tensor of shape + [B, T, num_outputs] and the list of new state tensors each with + shape [B, size]. + + Sample implementation for the ``MyRNNClass`` example:: + + def forward_rnn(self, inputs, state, seq_lens): + model_out, h, c = self.rnn_model([inputs, seq_lens] + state) + return model_out, [h, c] + """ + raise NotImplementedError("You must implement this for a RNN model") + + def get_initial_state(self) -> List[TensorType]: + """Get the initial recurrent state values for the model. + + Returns: + list of np.array objects, if any + + Sample implementation for the ``MyRNNClass`` example:: + + def get_initial_state(self): + return [ + np.zeros(self.cell_size, np.float32), + np.zeros(self.cell_size, np.float32), + ] + """ + raise NotImplementedError("You must implement this for a RNN model") + + +@OldAPIStack +class LSTMWrapper(RecurrentNetwork): + """An LSTM wrapper serving as an interface for ModelV2s that set use_lstm.""" + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + ): + super(LSTMWrapper, self).__init__( + obs_space, action_space, None, model_config, name + ) + # At this point, self.num_outputs is the number of nodes coming + # from the wrapped (underlying) model. In other words, self.num_outputs + # is the input size for the LSTM layer. + # If None, set it to the observation space. + if self.num_outputs is None: + self.num_outputs = int(np.prod(self.obs_space.shape)) + + self.cell_size = model_config["lstm_cell_size"] + self.use_prev_action = model_config["lstm_use_prev_action"] + self.use_prev_reward = model_config["lstm_use_prev_reward"] + + self.action_space_struct = get_base_struct_from_space(self.action_space) + self.action_dim = 0 + + for space in tree.flatten(self.action_space_struct): + if isinstance(space, Discrete): + self.action_dim += space.n + elif isinstance(space, MultiDiscrete): + self.action_dim += np.sum(space.nvec) + elif space.shape is not None: + self.action_dim += int(np.prod(space.shape)) + else: + self.action_dim += int(len(space)) + + # Add prev-action/reward nodes to input to LSTM. + if self.use_prev_action: + self.num_outputs += self.action_dim + if self.use_prev_reward: + self.num_outputs += 1 + + # Define input layers. + input_layer = tf.keras.layers.Input( + shape=(None, self.num_outputs), name="inputs" + ) + + # Set self.num_outputs to the number of output nodes desired by the + # caller of this constructor. + self.num_outputs = num_outputs + + state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h") + state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c") + seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32) + + # Preprocess observation with a hidden layer and send to LSTM cell + lstm_out, state_h, state_c = tf.keras.layers.LSTM( + self.cell_size, return_sequences=True, return_state=True, name="lstm" + )( + inputs=input_layer, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c], + ) + + # Postprocess LSTM output with another hidden layer and compute values + logits = tf.keras.layers.Dense( + self.num_outputs, activation=tf.keras.activations.linear, name="logits" + )(lstm_out) + values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out) + + # Create the RNN model + self._rnn_model = tf.keras.Model( + inputs=[input_layer, seq_in, state_in_h, state_in_c], + outputs=[logits, values, state_h, state_c], + ) + # Print out model summary in INFO logging mode. + if logger.isEnabledFor(logging.INFO): + self._rnn_model.summary() + + # Add prev-a/r to this model's view, if required. + if model_config["lstm_use_prev_action"]: + self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement( + SampleBatch.ACTIONS, space=self.action_space, shift=-1 + ) + if model_config["lstm_use_prev_reward"]: + self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement( + SampleBatch.REWARDS, shift=-1 + ) + + @override(RecurrentNetwork) + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> Tuple[TensorType, List[TensorType]]: + assert seq_lens is not None + # Push obs through "unwrapped" net's `forward()` first. + wrapped_out, _ = self._wrapped_forward(input_dict, [], None) + + # Concat. prev-action/reward if required. + prev_a_r = [] + + # Prev actions. + if self.model_config["lstm_use_prev_action"]: + prev_a = input_dict[SampleBatch.PREV_ACTIONS] + # If actions are not processed yet (in their original form as + # have been sent to environment): + # Flatten/one-hot into 1D array. + if self.model_config["_disable_action_flattening"]: + prev_a_r.append( + flatten_inputs_to_1d_tensor( + prev_a, + spaces_struct=self.action_space_struct, + time_axis=False, + ) + ) + # If actions are already flattened (but not one-hot'd yet!), + # one-hot discrete/multi-discrete actions here. + else: + if isinstance(self.action_space, (Discrete, MultiDiscrete)): + prev_a = one_hot(prev_a, self.action_space) + prev_a_r.append( + tf.reshape(tf.cast(prev_a, tf.float32), [-1, self.action_dim]) + ) + # Prev rewards. + if self.model_config["lstm_use_prev_reward"]: + prev_a_r.append( + tf.reshape( + tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32), [-1, 1] + ) + ) + + # Concat prev. actions + rewards to the "main" input. + if prev_a_r: + wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1) + + # Push everything through our LSTM. + input_dict["obs_flat"] = wrapped_out + return super().forward(input_dict, state, seq_lens) + + @override(RecurrentNetwork) + def forward_rnn( + self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType + ) -> Tuple[TensorType, List[TensorType]]: + model_out, self._value_out, h, c = self._rnn_model([inputs, seq_lens] + state) + return model_out, [h, c] + + @override(ModelV2) + def get_initial_state(self) -> List[np.ndarray]: + return [ + np.zeros(self.cell_size, np.float32), + np.zeros(self.cell_size, np.float32), + ] + + @override(ModelV2) + def value_function(self) -> TensorType: + return tf.reshape(self._value_out, [-1]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_action_dist.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_action_dist.py new file mode 100644 index 0000000000000000000000000000000000000000..683d1939776d3f346ec5f52e97ca5b95e049cdb6 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_action_dist.py @@ -0,0 +1,735 @@ +import functools +import gymnasium as gym +from math import log +import numpy as np +import tree # pip install dm_tree +from typing import Optional + +from ray.rllib.models.action_dist import ActionDistribution +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.utils import MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT, SMALL_NUMBER +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_tf, try_import_tfp +from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space +from ray.rllib.utils.typing import TensorType, List, Union, Tuple, ModelConfigDict + +tf1, tf, tfv = try_import_tf() +tfp = try_import_tfp() + + +@OldAPIStack +class TFActionDistribution(ActionDistribution): + """TF-specific extensions for building action distributions.""" + + @override(ActionDistribution) + def __init__(self, inputs: List[TensorType], model: ModelV2): + super().__init__(inputs, model) + self.sample_op = self._build_sample_op() + self.sampled_action_logp_op = self.logp(self.sample_op) + + def _build_sample_op(self) -> TensorType: + """Implement this instead of sample(), to enable op reuse. + + This is needed since the sample op is non-deterministic and is shared + between sample() and sampled_action_logp(). + """ + raise NotImplementedError + + @override(ActionDistribution) + def sample(self) -> TensorType: + """Draw a sample from the action distribution.""" + return self.sample_op + + @override(ActionDistribution) + def sampled_action_logp(self) -> TensorType: + """Returns the log probability of the sampled action.""" + return self.sampled_action_logp_op + + +@OldAPIStack +class Categorical(TFActionDistribution): + """Categorical distribution for discrete action spaces.""" + + def __init__( + self, inputs: List[TensorType], model: ModelV2 = None, temperature: float = 1.0 + ): + assert temperature > 0.0, "Categorical `temperature` must be > 0.0!" + # Allow softmax formula w/ temperature != 1.0: + # Divide inputs by temperature. + super().__init__(inputs / temperature, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + return tf.math.argmax(self.inputs, axis=1) + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + return -tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=self.inputs, labels=tf.cast(x, tf.int32) + ) + + @override(ActionDistribution) + def entropy(self) -> TensorType: + a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True) + ea0 = tf.exp(a0) + z0 = tf.reduce_sum(ea0, axis=1, keepdims=True) + p0 = ea0 / z0 + return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=1) + + @override(ActionDistribution) + def kl(self, other: ActionDistribution) -> TensorType: + a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True) + a1 = other.inputs - tf.reduce_max(other.inputs, axis=1, keepdims=True) + ea0 = tf.exp(a0) + ea1 = tf.exp(a1) + z0 = tf.reduce_sum(ea0, axis=1, keepdims=True) + z1 = tf.reduce_sum(ea1, axis=1, keepdims=True) + p0 = ea0 / z0 + return tf.reduce_sum(p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=1) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return tf.squeeze(tf.random.categorical(self.inputs, 1), axis=1) + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape(action_space, model_config): + return action_space.n + + +@OldAPIStack +def get_categorical_class_with_temperature(t: float): + """Categorical distribution class that has customized default temperature.""" + + class CategoricalWithTemperature(Categorical): + def __init__(self, inputs, model=None, temperature=t): + super().__init__(inputs, model, temperature) + + return CategoricalWithTemperature + + +@OldAPIStack +class MultiCategorical(TFActionDistribution): + """MultiCategorical distribution for MultiDiscrete action spaces.""" + + def __init__( + self, + inputs: List[TensorType], + model: ModelV2, + input_lens: Union[List[int], np.ndarray, Tuple[int, ...]], + action_space=None, + ): + # skip TFActionDistribution init + ActionDistribution.__init__(self, inputs, model) + self.cats = [ + Categorical(input_, model) + for input_ in tf.split(inputs, input_lens, axis=1) + ] + self.action_space = action_space + if self.action_space is None: + self.action_space = gym.spaces.MultiDiscrete( + [c.inputs.shape[1] for c in self.cats] + ) + self.sample_op = self._build_sample_op() + self.sampled_action_logp_op = self.logp(self.sample_op) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + sample_ = tf.stack([cat.deterministic_sample() for cat in self.cats], axis=1) + if isinstance(self.action_space, gym.spaces.Box): + return tf.cast( + tf.reshape(sample_, [-1] + list(self.action_space.shape)), + self.action_space.dtype, + ) + return sample_ + + @override(ActionDistribution) + def logp(self, actions: TensorType) -> TensorType: + # If tensor is provided, unstack it into list. + if isinstance(actions, tf.Tensor): + if isinstance(self.action_space, gym.spaces.Box): + actions = tf.reshape( + actions, [-1, int(np.prod(self.action_space.shape))] + ) + elif isinstance(self.action_space, gym.spaces.MultiDiscrete): + actions.set_shape((None, len(self.cats))) + actions = tf.unstack(tf.cast(actions, tf.int32), axis=1) + logps = tf.stack([cat.logp(act) for cat, act in zip(self.cats, actions)]) + return tf.reduce_sum(logps, axis=0) + + @override(ActionDistribution) + def multi_entropy(self) -> TensorType: + return tf.stack([cat.entropy() for cat in self.cats], axis=1) + + @override(ActionDistribution) + def entropy(self) -> TensorType: + return tf.reduce_sum(self.multi_entropy(), axis=1) + + @override(ActionDistribution) + def multi_kl(self, other: ActionDistribution) -> TensorType: + return tf.stack( + [cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)], axis=1 + ) + + @override(ActionDistribution) + def kl(self, other: ActionDistribution) -> TensorType: + return tf.reduce_sum(self.multi_kl(other), axis=1) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + sample_op = tf.stack([cat.sample() for cat in self.cats], axis=1) + if isinstance(self.action_space, gym.spaces.Box): + return tf.cast( + tf.reshape(sample_op, [-1] + list(self.action_space.shape)), + dtype=self.action_space.dtype, + ) + return sample_op + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + # Int Box. + if isinstance(action_space, gym.spaces.Box): + assert action_space.dtype.name.startswith("int") + low_ = np.min(action_space.low) + high_ = np.max(action_space.high) + assert np.all(action_space.low == low_) + assert np.all(action_space.high == high_) + return np.prod(action_space.shape, dtype=np.int32) * (high_ - low_ + 1) + # MultiDiscrete space. + else: + # nvec is already integer, so no casting needed. + return np.sum(action_space.nvec) + + +@OldAPIStack +class SlateMultiCategorical(Categorical): + """MultiCategorical distribution for MultiDiscrete action spaces. + + The action space must be uniform, meaning all nvec items have the same size, e.g. + MultiDiscrete([10, 10, 10]), where 10 is the number of candidates to pick from + and 3 is the slate size (pick 3 out of 10). When picking candidates, no candidate + must be picked more than once. + """ + + def __init__( + self, + inputs: List[TensorType], + model: ModelV2 = None, + temperature: float = 1.0, + action_space: Optional[gym.spaces.MultiDiscrete] = None, + all_slates=None, + ): + assert temperature > 0.0, "Categorical `temperature` must be > 0.0!" + # Allow softmax formula w/ temperature != 1.0: + # Divide inputs by temperature. + super().__init__(inputs / temperature, model) + self.action_space = action_space + # Assert uniformness of the action space (all discrete buckets have the same + # size). + assert isinstance(self.action_space, gym.spaces.MultiDiscrete) and all( + n == self.action_space.nvec[0] for n in self.action_space.nvec + ) + self.all_slates = all_slates + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + # Get a sample from the underlying Categorical (batch of ints). + sample = super().deterministic_sample() + # Use the sampled ints to pick the actual slates. + return tf.gather(self.all_slates, sample) + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + # TODO: Implement. + return tf.ones_like(self.inputs[:, 0]) + + +@OldAPIStack +class GumbelSoftmax(TFActionDistribution): + """GumbelSoftmax distr. (for differentiable sampling in discr. actions + + The Gumbel Softmax distribution [1] (also known as the Concrete [2] + distribution) is a close cousin of the relaxed one-hot categorical + distribution, whose tfp implementation we will use here plus + adjusted `sample_...` and `log_prob` methods. See discussion at [0]. + + [0] https://stackoverflow.com/questions/56226133/ + soft-actor-critic-with-discrete-action-space + + [1] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017): + https://arxiv.org/abs/1611.01144 + [2] The Concrete Distribution: A Continuous Relaxation of Discrete Random + Variables (Maddison et al, 2017) https://arxiv.org/abs/1611.00712 + """ + + def __init__( + self, inputs: List[TensorType], model: ModelV2 = None, temperature: float = 1.0 + ): + """Initializes a GumbelSoftmax distribution. + + Args: + temperature: Temperature parameter. For low temperatures, + the expected value approaches a categorical random variable. + For high temperatures, the expected value approaches a uniform + distribution. + """ + assert temperature >= 0.0 + self.dist = tfp.distributions.RelaxedOneHotCategorical( + temperature=temperature, logits=inputs + ) + self.probs = tf.nn.softmax(self.dist._distribution.logits) + super().__init__(inputs, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + # Return the dist object's prob values. + return self.probs + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + # Override since the implementation of tfp.RelaxedOneHotCategorical + # yields positive values. + if x.shape != self.dist.logits.shape: + values = tf.one_hot( + x, self.dist.logits.shape.as_list()[-1], dtype=tf.float32 + ) + assert values.shape == self.dist.logits.shape, ( + values.shape, + self.dist.logits.shape, + ) + + # [0]'s implementation (see line below) seems to be an approximation + # to the actual Gumbel Softmax density. + return -tf.reduce_sum( + -x * tf.nn.log_softmax(self.dist.logits, axis=-1), axis=-1 + ) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return self.dist.sample() + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return action_space.n + + +@OldAPIStack +class DiagGaussian(TFActionDistribution): + """Action distribution where each vector element is a gaussian. + + The first half of the input vector defines the gaussian means, and the + second half the gaussian standard deviations. + """ + + def __init__( + self, + inputs: List[TensorType], + model: ModelV2, + *, + action_space: Optional[gym.spaces.Space] = None + ): + mean, log_std = tf.split(inputs, 2, axis=1) + self.mean = mean + self.log_std = log_std + self.std = tf.exp(log_std) + # Remember to squeeze action samples in case action space is Box(shape) + self.zero_action_dim = action_space and action_space.shape == () + super().__init__(inputs, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + return self.mean + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + # Cover case where action space is Box(shape=()). + if int(tf.shape(x).shape[0]) == 1: + x = tf.expand_dims(x, axis=1) + return ( + -0.5 + * tf.reduce_sum( + tf.math.square((tf.cast(x, tf.float32) - self.mean) / self.std), axis=1 + ) + - 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[1], tf.float32) + - tf.reduce_sum(self.log_std, axis=1) + ) + + @override(ActionDistribution) + def kl(self, other: ActionDistribution) -> TensorType: + assert isinstance(other, DiagGaussian) + return tf.reduce_sum( + other.log_std + - self.log_std + + (tf.math.square(self.std) + tf.math.square(self.mean - other.mean)) + / (2.0 * tf.math.square(other.std)) + - 0.5, + axis=1, + ) + + @override(ActionDistribution) + def entropy(self) -> TensorType: + return tf.reduce_sum(self.log_std + 0.5 * np.log(2.0 * np.pi * np.e), axis=1) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + sample = self.mean + self.std * tf.random.normal(tf.shape(self.mean)) + if self.zero_action_dim: + return tf.squeeze(sample, axis=-1) + return sample + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return np.prod(action_space.shape, dtype=np.int32) * 2 + + +@OldAPIStack +class SquashedGaussian(TFActionDistribution): + """A tanh-squashed Gaussian distribution defined by: mean, std, low, high. + + The distribution will never return low or high exactly, but + `low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively. + """ + + def __init__( + self, + inputs: List[TensorType], + model: ModelV2, + low: float = -1.0, + high: float = 1.0, + ): + """Parameterizes the distribution via `inputs`. + + Args: + low: The lowest possible sampling value + (excluding this value). + high: The highest possible sampling value + (excluding this value). + """ + assert tfp is not None + mean, log_std = tf.split(inputs, 2, axis=-1) + # Clip `scale` values (coming from NN) to reasonable values. + log_std = tf.clip_by_value(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT) + std = tf.exp(log_std) + self.distr = tfp.distributions.Normal(loc=mean, scale=std) + assert np.all(np.less(low, high)) + self.low = low + self.high = high + super().__init__(inputs, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + mean = self.distr.mean() + return self._squash(mean) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return self._squash(self.distr.sample()) + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + # Unsquash values (from [low,high] to ]-inf,inf[) + unsquashed_values = tf.cast(self._unsquash(x), self.inputs.dtype) + # Get log prob of unsquashed values from our Normal. + log_prob_gaussian = self.distr.log_prob(unsquashed_values) + # For safety reasons, clamp somehow, only then sum up. + log_prob_gaussian = tf.clip_by_value(log_prob_gaussian, -100, 100) + log_prob_gaussian = tf.reduce_sum(log_prob_gaussian, axis=-1) + # Get log-prob for squashed Gaussian. + unsquashed_values_tanhd = tf.math.tanh(unsquashed_values) + log_prob = log_prob_gaussian - tf.reduce_sum( + tf.math.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER), axis=-1 + ) + return log_prob + + def sample_logp(self): + z = self.distr.sample() + actions = self._squash(z) + return actions, tf.reduce_sum( + self.distr.log_prob(z) - tf.math.log(1 - actions * actions + SMALL_NUMBER), + axis=-1, + ) + + @override(ActionDistribution) + def entropy(self) -> TensorType: + raise ValueError("Entropy not defined for SquashedGaussian!") + + @override(ActionDistribution) + def kl(self, other: ActionDistribution) -> TensorType: + raise ValueError("KL not defined for SquashedGaussian!") + + def _squash(self, raw_values: TensorType) -> TensorType: + # Returned values are within [low, high] (including `low` and `high`). + squashed = ((tf.math.tanh(raw_values) + 1.0) / 2.0) * ( + self.high - self.low + ) + self.low + return tf.clip_by_value(squashed, self.low, self.high) + + def _unsquash(self, values: TensorType) -> TensorType: + normed_values = (values - self.low) / (self.high - self.low) * 2.0 - 1.0 + # Stabilize input to atanh. + save_normed_values = tf.clip_by_value( + normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER + ) + unsquashed = tf.math.atanh(save_normed_values) + return unsquashed + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return np.prod(action_space.shape, dtype=np.int32) * 2 + + +@OldAPIStack +class Beta(TFActionDistribution): + """ + A Beta distribution is defined on the interval [0, 1] and parameterized by + shape parameters alpha and beta (also called concentration parameters). + + PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z + with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta) + and Gamma(n) = (n - 1)! + """ + + def __init__( + self, + inputs: List[TensorType], + model: ModelV2, + low: float = 0.0, + high: float = 1.0, + ): + # Stabilize input parameters (possibly coming from a linear layer). + inputs = tf.clip_by_value(inputs, log(SMALL_NUMBER), -log(SMALL_NUMBER)) + inputs = tf.math.log(tf.math.exp(inputs) + 1.0) + 1.0 + self.low = low + self.high = high + alpha, beta = tf.split(inputs, 2, axis=-1) + # Note: concentration0==beta, concentration1=alpha (!) + self.dist = tfp.distributions.Beta(concentration1=alpha, concentration0=beta) + super().__init__(inputs, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + mean = self.dist.mean() + return self._squash(mean) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return self._squash(self.dist.sample()) + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + unsquashed_values = self._unsquash(x) + return tf.math.reduce_sum(self.dist.log_prob(unsquashed_values), axis=-1) + + def _squash(self, raw_values: TensorType) -> TensorType: + return raw_values * (self.high - self.low) + self.low + + def _unsquash(self, values: TensorType) -> TensorType: + return (values - self.low) / (self.high - self.low) + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return np.prod(action_space.shape, dtype=np.int32) * 2 + + +@OldAPIStack +class Deterministic(TFActionDistribution): + """Action distribution that returns the input values directly. + + This is similar to DiagGaussian with standard deviation zero (thus only + requiring the "mean" values as NN output). + """ + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + return self.inputs + + @override(TFActionDistribution) + def logp(self, x: TensorType) -> TensorType: + return tf.zeros_like(self.inputs) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return self.inputs + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return np.prod(action_space.shape, dtype=np.int32) + + +@OldAPIStack +class MultiActionDistribution(TFActionDistribution): + """Action distribution that operates on a set of actions. + + Args: + inputs (Tensor list): A list of tensors from which to compute samples. + """ + + def __init__( + self, inputs, model, *, child_distributions, input_lens, action_space, **kwargs + ): + ActionDistribution.__init__(self, inputs, model) + + self.action_space_struct = get_base_struct_from_space(action_space) + + self.input_lens = np.array(input_lens, dtype=np.int32) + split_inputs = tf.split(inputs, self.input_lens, axis=1) + self.flat_child_distributions = tree.map_structure( + lambda dist, input_: dist(input_, model, **kwargs), + child_distributions, + split_inputs, + ) + + @override(ActionDistribution) + def logp(self, x): + # Single tensor input (all merged). + if isinstance(x, (tf.Tensor, np.ndarray)): + split_indices = [] + for dist in self.flat_child_distributions: + if isinstance(dist, Categorical): + split_indices.append(1) + elif ( + isinstance(dist, MultiCategorical) and dist.action_space is not None + ): + split_indices.append(np.prod(dist.action_space.shape)) + else: + sample = dist.sample() + # Cover Box(shape=()) case. + if len(sample.shape) == 1: + split_indices.append(1) + else: + split_indices.append(tf.shape(sample)[1]) + split_x = tf.split(x, split_indices, axis=1) + # Structured or flattened (by single action component) input. + else: + split_x = tree.flatten(x) + + def map_(val, dist): + # Remove extra categorical dimension. + if isinstance(dist, Categorical): + val = tf.cast( + tf.squeeze(val, axis=-1) if len(val.shape) > 1 else val, tf.int32 + ) + return dist.logp(val) + + # Remove extra categorical dimension and take the logp of each + # component. + flat_logps = tree.map_structure(map_, split_x, self.flat_child_distributions) + + return functools.reduce(lambda a, b: a + b, flat_logps) + + @override(ActionDistribution) + def kl(self, other): + kl_list = [ + d.kl(o) + for d, o in zip( + self.flat_child_distributions, other.flat_child_distributions + ) + ] + return functools.reduce(lambda a, b: a + b, kl_list) + + @override(ActionDistribution) + def entropy(self): + entropy_list = [d.entropy() for d in self.flat_child_distributions] + return functools.reduce(lambda a, b: a + b, entropy_list) + + @override(ActionDistribution) + def sample(self): + child_distributions = tree.unflatten_as( + self.action_space_struct, self.flat_child_distributions + ) + return tree.map_structure(lambda s: s.sample(), child_distributions) + + @override(ActionDistribution) + def deterministic_sample(self): + child_distributions = tree.unflatten_as( + self.action_space_struct, self.flat_child_distributions + ) + return tree.map_structure( + lambda s: s.deterministic_sample(), child_distributions + ) + + @override(TFActionDistribution) + def sampled_action_logp(self): + p = self.flat_child_distributions[0].sampled_action_logp() + for c in self.flat_child_distributions[1:]: + p += c.sampled_action_logp() + return p + + @override(ActionDistribution) + def required_model_output_shape(self, action_space, model_config): + return np.sum(self.input_lens, dtype=np.int32) + + +@OldAPIStack +class Dirichlet(TFActionDistribution): + """Dirichlet distribution for continuous actions that are between + [0,1] and sum to 1. + + e.g. actions that represent resource allocation.""" + + def __init__(self, inputs: List[TensorType], model: ModelV2): + """Input is a tensor of logits. The exponential of logits is used to + parametrize the Dirichlet distribution as all parameters need to be + positive. An arbitrary small epsilon is added to the concentration + parameters to be zero due to numerical error. + + See issue #4440 for more details. + """ + self.epsilon = 1e-7 + concentration = tf.exp(inputs) + self.epsilon + self.dist = tf1.distributions.Dirichlet( + concentration=concentration, + validate_args=True, + allow_nan_stats=False, + ) + super().__init__(concentration, model) + + @override(ActionDistribution) + def deterministic_sample(self) -> TensorType: + return tf.nn.softmax(self.dist.concentration) + + @override(ActionDistribution) + def logp(self, x: TensorType) -> TensorType: + # Support of Dirichlet are positive real numbers. x is already + # an array of positive numbers, but we clip to avoid zeros due to + # numerical errors. + x = tf.maximum(x, self.epsilon) + x = x / tf.reduce_sum(x, axis=-1, keepdims=True) + return self.dist.log_prob(x) + + @override(ActionDistribution) + def entropy(self) -> TensorType: + return self.dist.entropy() + + @override(ActionDistribution) + def kl(self, other: ActionDistribution) -> TensorType: + return self.dist.kl_divergence(other.dist) + + @override(TFActionDistribution) + def _build_sample_op(self) -> TensorType: + return self.dist.sample() + + @staticmethod + @override(ActionDistribution) + def required_model_output_shape( + action_space: gym.Space, model_config: ModelConfigDict + ) -> Union[int, np.ndarray]: + return np.prod(action_space.shape, dtype=np.int32) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..a99898f53e7f63b4c6a6d2e9ceb95dfc041f940c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py @@ -0,0 +1,552 @@ +"""The main difference between this and the old ActionDistribution is that this one +has more explicit input args. So that the input format does not have to be guessed from +the code. This matches the design pattern of torch distribution which developers may +already be familiar with. +""" +import gymnasium as gym +import tree +import numpy as np +from typing import Dict, Iterable, List, Optional +import abc + + +from ray.rllib.models.distributions import Distribution +from ray.rllib.utils.annotations import override, DeveloperAPI +from ray.rllib.utils.framework import try_import_tf, try_import_tfp +from ray.rllib.utils.typing import TensorType, Union, Tuple + + +_, tf, _ = try_import_tf() +tfp = try_import_tfp() + +# TODO (Kourosh) Write unittest for this class similar to torch distributions. + + +@DeveloperAPI +class TfDistribution(Distribution, abc.ABC): + """Wrapper class for tfp.distributions.""" + + def __init__(self, *args, **kwargs): + super().__init__() + self._dist = self._get_tf_distribution(*args, **kwargs) + + @abc.abstractmethod + def _get_tf_distribution(self, *args, **kwargs) -> "tfp.distributions.Distribution": + """Returns the tfp.distributions.Distribution object to use.""" + + @override(Distribution) + def logp(self, value: TensorType, **kwargs) -> TensorType: + return self._dist.log_prob(value, **kwargs) + + @override(Distribution) + def entropy(self) -> TensorType: + return self._dist.entropy() + + @override(Distribution) + def kl(self, other: "Distribution") -> TensorType: + return self._dist.kl_divergence(other._dist) + + @override(Distribution) + def sample( + self, *, sample_shape=() + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + sample = self._dist.sample(sample_shape) + return sample + + @override(Distribution) + def rsample( + self, *, sample_shape=() + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + raise NotImplementedError + + +@DeveloperAPI +class TfCategorical(TfDistribution): + """Wrapper class for Categorical distribution. + + Creates a categorical distribution parameterized by either :attr:`probs` or + :attr:`logits` (but not both). + + Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is + ``probs.size(-1)``. + + If `probs` is 1-dimensional with length-`K`, each element is the relative + probability of sampling the class at that index. + + If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of + relative probability vectors. + + .. testcode:: + :skipif: True + + m = TfCategorical([ 0.25, 0.25, 0.25, 0.25 ]) + m.sample(sample_shape=(2,)) # equal probability of 0, 1, 2, 3 + + .. testoutput:: + + tf.Tensor([2 3], shape=(2,), dtype=int32) + + Args: + probs: The probablities of each event. + logits: Event log probabilities (unnormalized) + temperature: In case of using logits, this parameter can be used to determine + the sharpness of the distribution. i.e. + ``probs = softmax(logits / temperature)``. The temperature must be strictly + positive. A low value (e.g. 1e-10) will result in argmax sampling while a + larger value will result in uniform sampling. + """ + + @override(TfDistribution) + def __init__( + self, + probs: "tf.Tensor" = None, + logits: "tf.Tensor" = None, + ) -> None: + # We assert this here because to_deterministic makes this assumption. + assert (probs is None) != ( + logits is None + ), "Exactly one out of `probs` and `logits` must be set!" + + self.probs = probs + self.logits = logits + self.one_hot = tfp.distributions.OneHotCategorical(logits=logits, probs=probs) + super().__init__(logits=logits, probs=probs) + + @override(Distribution) + def logp(self, value: TensorType, **kwargs) -> TensorType: + # This prevents an error in which float values at the boundaries of the range + # of the distribution are passed to this function. + return -tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=self.logits if self.logits is not None else tf.log(self.probs), + labels=tf.cast(value, tf.int32), + ) + + @override(TfDistribution) + def _get_tf_distribution( + self, + probs: "tf.Tensor" = None, + logits: "tf.Tensor" = None, + ) -> "tfp.distributions.Distribution": + return tfp.distributions.Categorical(probs=probs, logits=logits) + + @staticmethod + @override(Distribution) + def required_input_dim(space: gym.Space, **kwargs) -> int: + assert isinstance(space, gym.spaces.Discrete) + return int(space.n) + + @override(Distribution) + def rsample(self, sample_shape=()): + one_hot_sample = self.one_hot.sample(sample_shape) + return tf.stop_gradients(one_hot_sample - self.probs) + self.probs + + @classmethod + @override(Distribution) + def from_logits(cls, logits: TensorType, **kwargs) -> "TfCategorical": + return TfCategorical(logits=logits, **kwargs) + + def to_deterministic(self) -> "TfDeterministic": + if self.probs is not None: + probs_or_logits = self.probs + else: + probs_or_logits = self.logits + + return TfDeterministic(loc=tf.math.argmax(probs_or_logits, axis=-1)) + + +@DeveloperAPI +class TfDiagGaussian(TfDistribution): + """Wrapper class for Normal distribution. + + Creates a normal distribution parameterized by :attr:`loc` and :attr:`scale`. In + case of multi-dimensional distribution, the variance is assumed to be diagonal. + + .. testcode:: + :skipif: True + + m = TfDiagGaussian(loc=[0.0, 0.0], scale=[1.0, 1.0]) + m.sample(sample_shape=(2,)) # 2d normal dist with loc=0 and scale=1 + + .. testoutput:: + + tensor([[ 0.1046, -0.6120], [ 0.234, 0.556]]) + + .. testcode:: + :skipif: True + + # scale is None + m = TfDiagGaussian(loc=[0.0, 1.0]) + m.sample(sample_shape=(2,)) # normally distributed with loc=0 and scale=1 + + .. testoutput:: + + tensor([0.1046, 0.6120]) + + + Args: + loc: mean of the distribution (often referred to as mu). If scale is None, the + second half of the `loc` will be used as the log of scale. + scale: standard deviation of the distribution (often referred to as sigma). + Has to be positive. + """ + + @override(TfDistribution) + def __init__( + self, + loc: Union[float, TensorType], + scale: Optional[Union[float, TensorType]] = None, + ): + self.loc = loc + super().__init__(loc=loc, scale=scale) + + @override(TfDistribution) + def _get_tf_distribution(self, loc, scale) -> "tfp.distributions.Distribution": + return tfp.distributions.Normal(loc=loc, scale=scale) + + @override(TfDistribution) + def logp(self, value: TensorType) -> TensorType: + return tf.math.reduce_sum(super().logp(value), axis=-1) + + @override(TfDistribution) + def entropy(self) -> TensorType: + return tf.math.reduce_sum(super().entropy(), axis=-1) + + @override(TfDistribution) + def kl(self, other: "TfDistribution") -> TensorType: + return tf.math.reduce_sum(super().kl(other), axis=-1) + + @staticmethod + @override(Distribution) + def required_input_dim(space: gym.Space, **kwargs) -> int: + assert isinstance(space, gym.spaces.Box) + return int(np.prod(space.shape, dtype=np.int32) * 2) + + @override(Distribution) + def rsample(self, sample_shape=()): + eps = tf.random.normal(sample_shape) + return self._dist.loc + eps * self._dist.scale + + @classmethod + @override(Distribution) + def from_logits(cls, logits: TensorType, **kwargs) -> "TfDiagGaussian": + loc, log_std = tf.split(logits, num_or_size_splits=2, axis=-1) + scale = tf.math.exp(log_std) + return TfDiagGaussian(loc=loc, scale=scale) + + def to_deterministic(self) -> "TfDeterministic": + return TfDeterministic(loc=self.loc) + + +@DeveloperAPI +class TfDeterministic(Distribution): + """The distribution that returns the input values directly. + + This is similar to DiagGaussian with standard deviation zero (thus only + requiring the "mean" values as NN output). + + Note: entropy is always zero, ang logp and kl are not implemented. + + .. testcode:: + :skipif: True + + m = TfDeterministic(loc=tf.constant([0.0, 0.0])) + m.sample(sample_shape=(2,)) + + .. testoutput:: + + Tensor([[ 0.0, 0.0], [ 0.0, 0.0]]) + + Args: + loc: the determinsitic value to return + """ + + @override(Distribution) + def __init__(self, loc: "tf.Tensor") -> None: + super().__init__() + self.loc = loc + + @override(Distribution) + def sample( + self, + *, + sample_shape: Tuple[int, ...] = (), + **kwargs, + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + shape = sample_shape + self.loc.shape + return tf.ones(shape, dtype=self.loc.dtype) * self.loc + + @override(Distribution) + def rsample( + self, + *, + sample_shape: Tuple[int, ...] = None, + **kwargs, + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + raise NotImplementedError + + @override(Distribution) + def logp(self, value: TensorType, **kwargs) -> TensorType: + return tf.zeros_like(self.loc) + + @override(Distribution) + def entropy(self, **kwargs) -> TensorType: + raise RuntimeError(f"`entropy()` not supported for {self.__class__.__name__}.") + + @override(Distribution) + def kl(self, other: "Distribution", **kwargs) -> TensorType: + raise RuntimeError(f"`kl()` not supported for {self.__class__.__name__}.") + + @staticmethod + @override(Distribution) + def required_input_dim(space: gym.Space, **kwargs) -> int: + assert isinstance(space, gym.spaces.Box) + return int(np.prod(space.shape, dtype=np.int32)) + + @classmethod + @override(Distribution) + def from_logits(cls, logits: TensorType, **kwargs) -> "TfDeterministic": + return TfDeterministic(loc=logits) + + def to_deterministic(self) -> "TfDeterministic": + return self + + +@DeveloperAPI +class TfMultiCategorical(Distribution): + """MultiCategorical distribution for MultiDiscrete action spaces.""" + + @override(Distribution) + def __init__( + self, + categoricals: List[TfCategorical], + ): + super().__init__() + self._cats = categoricals + + @override(Distribution) + def sample(self) -> TensorType: + arr = [cat.sample() for cat in self._cats] + sample_ = tf.stack(arr, axis=-1) + return sample_ + + @override(Distribution) + def rsample(self, sample_shape=()): + arr = [cat.rsample() for cat in self._cats] + sample_ = tf.stack(arr, axis=-1) + return sample_ + + @override(Distribution) + def logp(self, value: tf.Tensor) -> TensorType: + actions = tf.unstack(tf.cast(value, tf.int32), axis=-1) + logps = tf.stack([cat.logp(act) for cat, act in zip(self._cats, actions)]) + return tf.reduce_sum(logps, axis=0) + + @override(Distribution) + def entropy(self) -> TensorType: + return tf.reduce_sum( + tf.stack([cat.entropy() for cat in self._cats], axis=-1), axis=-1 + ) + + @override(Distribution) + def kl(self, other: Distribution) -> TensorType: + kls = tf.stack( + [cat.kl(oth_cat) for cat, oth_cat in zip(self._cats, other._cats)], axis=-1 + ) + return tf.reduce_sum(kls, axis=-1) + + @staticmethod + @override(Distribution) + def required_input_dim(space: gym.Space, **kwargs) -> int: + assert isinstance(space, gym.spaces.MultiDiscrete) + return int(np.sum(space.nvec)) + + @classmethod + @override(Distribution) + def from_logits( + cls, + logits: tf.Tensor, + input_lens: List[int], + **kwargs, + ) -> "TfMultiCategorical": + """Creates this Distribution from logits (and additional arguments). + + If you wish to create this distribution from logits only, please refer to + `Distribution.get_partial_dist_cls()`. + + Args: + logits: The tensor containing logits to be separated by logit_lens. + child_distribution_cls_struct: A struct of Distribution classes that can + be instantiated from the given logits. + input_lens: A list of integers that indicate the length of the logits + vectors to be passed into each child distribution. + **kwargs: Forward compatibility kwargs. + """ + categoricals = [ + TfCategorical(logits=logits) + for logits in tf.split(logits, input_lens, axis=-1) + ] + + return TfMultiCategorical(categoricals=categoricals) + + def to_deterministic(self) -> "TfMultiDistribution": + return TfMultiDistribution([cat.to_deterministic() for cat in self._cats]) + + +@DeveloperAPI +class TfMultiDistribution(Distribution): + """Action distribution that operates on multiple, possibly nested actions.""" + + def __init__( + self, + child_distribution_struct: Union[Tuple, List, Dict], + ): + """Initializes a TfMultiDistribution object. + + Args: + child_distribution_struct: Any struct + that contains the child distribution classes to use to + instantiate the child distributions from `logits`. + """ + super().__init__() + self._original_struct = child_distribution_struct + self._flat_child_distributions = tree.flatten(child_distribution_struct) + + @override(Distribution) + def rsample( + self, + *, + sample_shape: Tuple[int, ...] = None, + **kwargs, + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + rsamples = [] + for dist in self._flat_child_distributions: + rsample = dist.rsample(sample_shape=sample_shape, **kwargs) + rsamples.append(rsample) + + rsamples = tree.unflatten_as(self._original_struct, rsamples) + return rsamples + + @override(Distribution) + def logp(self, value): + # Single tensor input (all merged). + if isinstance(value, (tf.Tensor, np.ndarray)): + split_indices = [] + for dist in self._flat_child_distributions: + if isinstance(dist, TfCategorical): + split_indices.append(1) + elif isinstance(dist, TfMultiCategorical): + split_indices.append(len(dist._cats)) + else: + sample = dist.sample() + # Cover Box(shape=()) case. + if len(sample.shape) == 1: + split_indices.append(1) + else: + split_indices.append(tf.shape(sample)[1]) + split_value = tf.split(value, split_indices, axis=1) + # Structured or flattened (by single action component) input. + else: + split_value = tree.flatten(value) + + def map_(val, dist): + # Remove extra dimension if present. + if ( + isinstance(dist, TfCategorical) + and len(val.shape) > 1 + and val.shape[-1] == 1 + ): + val = tf.squeeze(val, axis=-1) + + return dist.logp(val) + + # Remove extra categorical dimension and take the logp of each + # component. + flat_logps = tree.map_structure( + map_, split_value, self._flat_child_distributions + ) + + return sum(flat_logps) + + @override(Distribution) + def kl(self, other): + kl_list = [ + d.kl(o) + for d, o in zip( + self._flat_child_distributions, other._flat_child_distributions + ) + ] + return sum(kl_list) + + @override(Distribution) + def entropy(self): + entropy_list = [d.entropy() for d in self._flat_child_distributions] + return sum(entropy_list) + + @override(Distribution) + def sample(self): + child_distributions_struct = tree.unflatten_as( + self._original_struct, self._flat_child_distributions + ) + return tree.map_structure(lambda s: s.sample(), child_distributions_struct) + + @staticmethod + @override(Distribution) + def required_input_dim(space: gym.Space, input_lens: List[int], **kwargs) -> int: + return sum(input_lens) + + @classmethod + @override(Distribution) + def from_logits( + cls, + logits: tf.Tensor, + child_distribution_cls_struct: Union[Dict, Iterable], + input_lens: Union[Dict, List[int]], + space: gym.Space, + **kwargs, + ) -> "TfMultiDistribution": + """Creates this Distribution from logits (and additional arguments). + + If you wish to create this distribution from logits only, please refer to + `Distribution.get_partial_dist_cls()`. + + Args: + logits: The tensor containing logits to be separated by `input_lens`. + child_distribution_cls_struct: A struct of Distribution classes that can + be instantiated from the given logits. + child_distribution_cls_struct: A struct of Distribution classes that can + be instantiated from the given logits. + input_lens: A list or dict of integers that indicate the length of each + logit. If this is given as a dict, the structure should match the + structure of child_distribution_cls_struct. + space: The possibly nested output space. + **kwargs: Forward compatibility kwargs. + + Returns: + A TfMultiDistribution object. + """ + logit_lens = tree.flatten(input_lens) + child_distribution_cls_list = tree.flatten(child_distribution_cls_struct) + split_logits = tf.split(logits, logit_lens, axis=1) + + child_distribution_list = tree.map_structure( + lambda dist, input_: dist.from_logits(input_), + child_distribution_cls_list, + list(split_logits), + ) + + child_distribution_struct = tree.unflatten_as( + child_distribution_cls_struct, child_distribution_list + ) + + return TfMultiDistribution( + child_distribution_struct=child_distribution_struct, + ) + + def to_deterministic(self) -> "TfMultiDistribution": + flat_deterministic_dists = [ + dist.to_deterministic for dist in self._flat_child_distributions + ] + deterministic_dists = tree.unflatten_as( + self._original_struct, flat_deterministic_dists + ) + return TfMultiDistribution(deterministic_dists) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py new file mode 100644 index 0000000000000000000000000000000000000000..7438796944248b443b0e3c91332275745a1ab467 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py @@ -0,0 +1,142 @@ +import contextlib +import gymnasium as gym +import re +from typing import Dict, List, Union + +from ray.util import log_once +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.deprecation import deprecation_warning +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import ModelConfigDict, TensorType + +tf1, tf, tfv = try_import_tf() + + +@OldAPIStack +class TFModelV2(ModelV2): + """TF version of ModelV2, which should contain a tf keras Model. + + Note that this class by itself is not a valid model unless you + implement forward() in a subclass.""" + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + ): + """Initializes a TFModelV2 instance. + + Here is an example implementation for a subclass + ``MyModelClass(TFModelV2)``:: + + def __init__(self, *args, **kwargs): + super(MyModelClass, self).__init__(*args, **kwargs) + input_layer = tf.keras.layers.Input(...) + hidden_layer = tf.keras.layers.Dense(...)(input_layer) + output_layer = tf.keras.layers.Dense(...)(hidden_layer) + value_layer = tf.keras.layers.Dense(...)(hidden_layer) + self.base_model = tf.keras.Model( + input_layer, [output_layer, value_layer]) + """ + super().__init__( + obs_space, action_space, num_outputs, model_config, name, framework="tf" + ) + + # Deprecated: TFModelV2 now automatically track their variables. + self.var_list = [] + + if tf1.executing_eagerly(): + self.graph = None + else: + self.graph = tf1.get_default_graph() + + def context(self) -> contextlib.AbstractContextManager: + """Returns a contextmanager for the current TF graph.""" + if self.graph: + return self.graph.as_default() + else: + return ModelV2.context(self) + + def update_ops(self) -> List[TensorType]: + """Return the list of update ops for this model. + + For example, this should include any BatchNorm update ops.""" + return [] + + def register_variables(self, variables: List[TensorType]) -> None: + """Register the given list of variables with this model.""" + if log_once("deprecated_tfmodelv2_register_variables"): + deprecation_warning(old="TFModelV2.register_variables", error=False) + self.var_list.extend(variables) + + @override(ModelV2) + def variables( + self, as_dict: bool = False + ) -> Union[List[TensorType], Dict[str, TensorType]]: + if as_dict: + # Old way using `register_variables`. + if self.var_list: + return {v.name: v for v in self.var_list} + # New way: Automatically determine the var tree. + else: + return self._find_sub_modules("", self.__dict__) + + # Old way using `register_variables`. + if self.var_list: + return list(self.var_list) + # New way: Automatically determine the var tree. + else: + return list(self.variables(as_dict=True).values()) + + @override(ModelV2) + def trainable_variables( + self, as_dict: bool = False + ) -> Union[List[TensorType], Dict[str, TensorType]]: + if as_dict: + return { + k: v for k, v in self.variables(as_dict=True).items() if v.trainable + } + return [v for v in self.variables() if v.trainable] + + @staticmethod + def _find_sub_modules(current_key, struct): + # Keras Model: key=k + "." + var-name (replace '/' by '.'). + if isinstance(struct, tf.keras.models.Model) or isinstance(struct, tf.Module): + ret = {} + for var in struct.variables: + name = re.sub("/", ".", var.name) + key = current_key + "." + name + ret[key] = var + return ret + # Other TFModelV2: Include its vars into ours. + elif isinstance(struct, TFModelV2): + return { + current_key + "." + key: var + for key, var in struct.variables(as_dict=True).items() + } + # tf.Variable + elif isinstance(struct, tf.Variable): + return {current_key: struct} + # List/Tuple. + elif isinstance(struct, (tuple, list)): + ret = {} + for i, value in enumerate(struct): + sub_vars = TFModelV2._find_sub_modules( + current_key + "_{}".format(i), value + ) + ret.update(sub_vars) + return ret + # Dict. + elif isinstance(struct, dict): + if current_key: + current_key += "_" + ret = {} + for key, value in struct.items(): + sub_vars = TFModelV2._find_sub_modules(current_key + str(key), value) + ret.update(sub_vars) + return ret + return {} diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/visionnet.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/visionnet.py new file mode 100644 index 0000000000000000000000000000000000000000..69124c9e2e61ef48272dfcbae99503db13a98b07 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/tf/visionnet.py @@ -0,0 +1,264 @@ +import gymnasium as gym +from typing import Dict, List + +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.tf.misc import normc_initializer +from ray.rllib.models.utils import get_activation_fn, get_filter_config +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import ModelConfigDict, TensorType + +tf1, tf, tfv = try_import_tf() + + +@OldAPIStack +class VisionNetwork(TFModelV2): + """Generic vision network implemented in ModelV2 API. + + An additional post-conv fully connected stack can be added and configured + via the config keys: + `post_fcnet_hiddens`: Dense layer sizes after the Conv2D stack. + `post_fcnet_activation`: Activation function to use for this FC stack. + """ + + def __init__( + self, + obs_space: gym.spaces.Space, + action_space: gym.spaces.Space, + num_outputs: int, + model_config: ModelConfigDict, + name: str, + ): + if not model_config.get("conv_filters"): + model_config["conv_filters"] = get_filter_config(obs_space.shape) + + super(VisionNetwork, self).__init__( + obs_space, action_space, num_outputs, model_config, name + ) + + activation = get_activation_fn( + self.model_config.get("conv_activation"), framework="tf" + ) + filters = self.model_config["conv_filters"] + assert len(filters) > 0, "Must provide at least 1 entry in `conv_filters`!" + + # Post FC net config. + post_fcnet_hiddens = model_config.get("post_fcnet_hiddens", []) + post_fcnet_activation = get_activation_fn( + model_config.get("post_fcnet_activation"), framework="tf" + ) + + no_final_linear = self.model_config.get("no_final_linear") + vf_share_layers = self.model_config.get("vf_share_layers") + + input_shape = obs_space.shape + self.data_format = "channels_last" + + inputs = tf.keras.layers.Input(shape=input_shape, name="observations") + last_layer = inputs + # Whether the last layer is the output of a Flattened (rather than + # a n x (1,1) Conv2D). + self.last_layer_is_flattened = False + + # Build the action layers + for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1): + last_layer = tf.keras.layers.Conv2D( + out_size, + kernel, + strides=stride + if isinstance(stride, (list, tuple)) + else (stride, stride), + activation=activation, + padding="same", + data_format="channels_last", + name="conv{}".format(i), + )(last_layer) + + out_size, kernel, stride = filters[-1] + + # No final linear: Last layer has activation function and exits with + # num_outputs nodes (this could be a 1x1 conv or a FC layer, depending + # on `post_fcnet_...` settings). + if no_final_linear and num_outputs: + last_layer = tf.keras.layers.Conv2D( + out_size if post_fcnet_hiddens else num_outputs, + kernel, + strides=stride + if isinstance(stride, (list, tuple)) + else (stride, stride), + activation=activation, + padding="valid", + data_format="channels_last", + name="conv_out", + )(last_layer) + # Add (optional) post-fc-stack after last Conv2D layer. + layer_sizes = post_fcnet_hiddens[:-1] + ( + [num_outputs] if post_fcnet_hiddens else [] + ) + feature_out = last_layer + + for i, out_size in enumerate(layer_sizes): + feature_out = last_layer + last_layer = tf.keras.layers.Dense( + out_size, + name="post_fcnet_{}".format(i), + activation=post_fcnet_activation, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + + # Finish network normally (w/o overriding last layer size with + # `num_outputs`), then add another linear one of size `num_outputs`. + else: + last_layer = tf.keras.layers.Conv2D( + out_size, + kernel, + strides=stride + if isinstance(stride, (list, tuple)) + else (stride, stride), + activation=activation, + padding="valid", + data_format="channels_last", + name="conv{}".format(len(filters)), + )(last_layer) + + # num_outputs defined. Use that to create an exact + # `num_output`-sized (1,1)-Conv2D. + if num_outputs: + if post_fcnet_hiddens: + last_cnn = last_layer = tf.keras.layers.Conv2D( + post_fcnet_hiddens[0], + [1, 1], + activation=post_fcnet_activation, + padding="same", + data_format="channels_last", + name="conv_out", + )(last_layer) + # Add (optional) post-fc-stack after last Conv2D layer. + for i, out_size in enumerate( + post_fcnet_hiddens[1:] + [num_outputs] + ): + feature_out = last_layer + last_layer = tf.keras.layers.Dense( + out_size, + name="post_fcnet_{}".format(i + 1), + activation=post_fcnet_activation + if i < len(post_fcnet_hiddens) - 1 + else None, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + else: + feature_out = last_layer + last_cnn = last_layer = tf.keras.layers.Conv2D( + num_outputs, + [1, 1], + activation=None, + padding="same", + data_format="channels_last", + name="conv_out", + )(last_layer) + + if last_cnn.shape[1] != 1 or last_cnn.shape[2] != 1: + raise ValueError( + "Given `conv_filters` ({}) do not result in a [B, 1, " + "1, {} (`num_outputs`)] shape (but in {})! Please " + "adjust your Conv2D stack such that the dims 1 and 2 " + "are both 1.".format( + self.model_config["conv_filters"], + self.num_outputs, + list(last_cnn.shape), + ) + ) + + # num_outputs not known -> Flatten, then set self.num_outputs + # to the resulting number of nodes. + else: + self.last_layer_is_flattened = True + last_layer = tf.keras.layers.Flatten(data_format="channels_last")( + last_layer + ) + + # Add (optional) post-fc-stack after last Conv2D layer. + for i, out_size in enumerate(post_fcnet_hiddens): + last_layer = tf.keras.layers.Dense( + out_size, + name="post_fcnet_{}".format(i), + activation=post_fcnet_activation, + kernel_initializer=normc_initializer(1.0), + )(last_layer) + feature_out = last_layer + self.num_outputs = last_layer.shape[1] + logits_out = last_layer + + # Build the value layers + if vf_share_layers: + if not self.last_layer_is_flattened: + feature_out = tf.keras.layers.Lambda( + lambda x: tf.squeeze(x, axis=[1, 2]) + )(feature_out) + value_out = tf.keras.layers.Dense( + 1, + name="value_out", + activation=None, + kernel_initializer=normc_initializer(0.01), + )(feature_out) + else: + # build a parallel set of hidden layers for the value net + last_layer = inputs + for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1): + last_layer = tf.keras.layers.Conv2D( + out_size, + kernel, + strides=stride + if isinstance(stride, (list, tuple)) + else (stride, stride), + activation=activation, + padding="same", + data_format="channels_last", + name="conv_value_{}".format(i), + )(last_layer) + out_size, kernel, stride = filters[-1] + last_layer = tf.keras.layers.Conv2D( + out_size, + kernel, + strides=stride + if isinstance(stride, (list, tuple)) + else (stride, stride), + activation=activation, + padding="valid", + data_format="channels_last", + name="conv_value_{}".format(len(filters)), + )(last_layer) + last_layer = tf.keras.layers.Conv2D( + 1, + [1, 1], + activation=None, + padding="same", + data_format="channels_last", + name="conv_value_out", + )(last_layer) + value_out = tf.keras.layers.Lambda(lambda x: tf.squeeze(x, axis=[1, 2]))( + last_layer + ) + + self.base_model = tf.keras.Model(inputs, [logits_out, value_out]) + + def forward( + self, + input_dict: Dict[str, TensorType], + state: List[TensorType], + seq_lens: TensorType, + ) -> (TensorType, List[TensorType]): + obs = input_dict["obs"] + if self.data_format == "channels_first": + obs = tf.transpose(obs, [0, 2, 3, 1]) + # Explicit cast to float32 needed in eager. + model_out, self._value_out = self.base_model(tf.cast(obs, tf.float32)) + # Our last layer is already flat. + if self.last_layer_is_flattened: + return model_out, state + # Last layer is a n x [1,1] Conv2D -> Flatten. + else: + return tf.squeeze(model_out, axis=[1, 2]), state + + def value_function(self) -> TensorType: + return tf.reshape(self._value_out, [-1]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4435d0b00bd1e134a7bf418d634a982fb4c4d22f Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_modelv2.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_modelv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b829623d05727c77cff0774cce1d46e95ce35753 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_modelv2.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e783aa35f4b64b1c8a70c771b4d03d0bd5eda980 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1149d62ef6466c878300df12013c638299b185d6 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffeac965004adc47d26c4d4da5b380d03f6f9592 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/multi_head_attention.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/multi_head_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e33426a15569f132ae0042a597d560033250e063 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/multi_head_attention.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/noisy_layer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/noisy_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e23b2700b72d62dbe069593314c78ff25892013 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/noisy_layer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d9e5fede783bd7b9f87ccd8c9c204a0ce21af1c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01f8404da2f07ad128cfdb1f7efff6fcebc63e7b --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/__init__.py @@ -0,0 +1,141 @@ +import contextlib +from functools import partial + +from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI +from ray.rllib.utils.deprecation import deprecation_warning +from ray.rllib.utils.filter import Filter +from ray.rllib.utils.filter_manager import FilterManager +from ray.rllib.utils.framework import ( + try_import_jax, + try_import_tf, + try_import_tfp, + try_import_torch, +) +from ray.rllib.utils.numpy import ( + sigmoid, + softmax, + relu, + one_hot, + fc, + lstm, + SMALL_NUMBER, + LARGE_INTEGER, + MIN_LOG_NN_OUTPUT, + MAX_LOG_NN_OUTPUT, +) +from ray.rllib.utils.schedules import ( + LinearSchedule, + PiecewiseSchedule, + PolynomialSchedule, + ExponentialSchedule, + ConstantSchedule, +) +from ray.rllib.utils.test_utils import ( + check, + check_compute_single_action, + check_train_results, +) +from ray.tune.utils import merge_dicts, deep_update + + +@DeveloperAPI +def add_mixins(base, mixins, reversed=False): + """Returns a new class with mixins applied in priority order.""" + + mixins = list(mixins or []) + + while mixins: + if reversed: + + class new_base(base, mixins.pop()): + pass + + else: + + class new_base(mixins.pop(), base): + pass + + base = new_base + + return base + + +@DeveloperAPI +def force_list(elements=None, to_tuple=False): + """ + Makes sure `elements` is returned as a list, whether `elements` is a single + item, already a list, or a tuple. + + Args: + elements (Optional[any]): The inputs as single item, list, or tuple to + be converted into a list/tuple. If None, returns empty list/tuple. + to_tuple: Whether to use tuple (instead of list). + + Returns: + Union[list,tuple]: All given elements in a list/tuple depending on + `to_tuple`'s value. If elements is None, + returns an empty list/tuple. + """ + ctor = list + if to_tuple is True: + ctor = tuple + return ( + ctor() + if elements is None + else ctor(elements) + if type(elements) in [list, set, tuple] + else ctor([elements]) + ) + + +@DeveloperAPI +class NullContextManager(contextlib.AbstractContextManager): + """No-op context manager""" + + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + + +force_tuple = partial(force_list, to_tuple=True) + +__all__ = [ + "add_mixins", + "check", + "check_compute_single_action", + "check_train_results", + "deep_update", + "deprecation_warning", + "fc", + "force_list", + "force_tuple", + "lstm", + "merge_dicts", + "one_hot", + "override", + "relu", + "sigmoid", + "softmax", + "try_import_jax", + "try_import_tf", + "try_import_tfp", + "try_import_torch", + "ConstantSchedule", + "DeveloperAPI", + "ExponentialSchedule", + "Filter", + "FilterManager", + "LARGE_INTEGER", + "LinearSchedule", + "MAX_LOG_NN_OUTPUT", + "MIN_LOG_NN_OUTPUT", + "PiecewiseSchedule", + "PolynomialSchedule", + "PublicAPI", + "SMALL_NUMBER", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actor_manager.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actor_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..7152d8599d83834735e16a638f08a633a0b37e79 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actor_manager.py @@ -0,0 +1,916 @@ +from collections import defaultdict +import copy +from dataclasses import dataclass +import logging +import sys +import time +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union + +import ray +from ray.actor import ActorHandle +from ray.exceptions import RayError, RayTaskError +from ray.rllib.utils.typing import T +from ray.util.annotations import DeveloperAPI + + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +class ResultOrError: + """A wrapper around a result or a RayError thrown during remote task/actor calls. + + This is used to return data from `FaultTolerantActorManager` that allows us to + distinguish between RayErrors (remote actor related) and valid results. + """ + + def __init__(self, result: Any = None, error: Exception = None): + """One and only one of result or error should be set. + + Args: + result: The result of the computation. Note that None is a valid result if + the remote function does not return anything. + error: Alternatively, the error that occurred during the computation. + """ + self._result = result + self._error = ( + # Easier to handle if we show the user the original error. + error.as_instanceof_cause() + if isinstance(error, RayTaskError) + else error + ) + + @property + def ok(self): + return self._error is None + + def get(self): + """Returns the result or the error.""" + if self._error: + return self._error + else: + return self._result + + +@DeveloperAPI +@dataclass +class CallResult: + """Represents a single result from a call to an actor. + + Each CallResult contains the index of the actor that was called + plus the result or error from the call. + """ + + actor_id: int + result_or_error: ResultOrError + tag: str + + @property + def ok(self): + """Passes through the ok property from the result_or_error.""" + return self.result_or_error.ok + + def get(self): + """Passes through the get method from the result_or_error.""" + return self.result_or_error.get() + + +@DeveloperAPI +class RemoteCallResults: + """Represents a list of results from calls to a set of actors. + + CallResults provides convenient APIs to iterate over the results + while skipping errors, etc. + + .. testcode:: + :skipif: True + + manager = FaultTolerantActorManager( + actors, max_remote_requests_in_flight_per_actor=2, + ) + results = manager.foreach_actor(lambda w: w.call()) + + # Iterate over all results ignoring errors. + for result in results.ignore_errors(): + print(result.get()) + """ + + class _Iterator: + """An iterator over the results of a remote call.""" + + def __init__(self, call_results: List[CallResult]): + self._call_results = call_results + + def __iter__(self) -> Iterator[CallResult]: + return self + + def __next__(self) -> CallResult: + if not self._call_results: + raise StopIteration + return self._call_results.pop(0) + + def __init__(self): + self.result_or_errors: List[CallResult] = [] + + def add_result(self, actor_id: int, result_or_error: ResultOrError, tag: str): + """Add index of a remote actor plus the call result to the list. + + Args: + actor_id: ID of the remote actor. + result_or_error: The result or error from the call. + tag: A description to identify the call. + """ + self.result_or_errors.append(CallResult(actor_id, result_or_error, tag)) + + def __iter__(self) -> Iterator[ResultOrError]: + """Return an iterator over the results.""" + # Shallow copy the list. + return self._Iterator(copy.copy(self.result_or_errors)) + + def __len__(self) -> int: + return len(self.result_or_errors) + + def ignore_errors(self) -> Iterator[ResultOrError]: + """Return an iterator over the results, skipping all errors.""" + return self._Iterator([r for r in self.result_or_errors if r.ok]) + + def ignore_ray_errors(self) -> Iterator[ResultOrError]: + """Return an iterator over the results, skipping only Ray errors. + + Similar to ignore_errors, but only skips Errors raised because of + remote actor problems (often get restored automatcially). + This is useful for callers that want to handle application errors differently + from Ray errors. + """ + return self._Iterator( + [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] + ) + + +@DeveloperAPI +class FaultAwareApply: + @DeveloperAPI + def ping(self) -> str: + """Ping the actor. Can be used as a health check. + + Returns: + "pong" if actor is up and well. + """ + return "pong" + + @DeveloperAPI + def apply( + self, + func: Callable[[Any, Optional[Any], Optional[Any]], T], + *args, + **kwargs, + ) -> T: + """Calls the given function with this Actor instance. + + A generic interface for applying arbitrary member functions on a + remote actor. + + Args: + func: The function to call, with this actor as first + argument, followed by args, and kwargs. + args: Optional additional args to pass to the function call. + kwargs: Optional additional kwargs to pass to the function call. + + Returns: + The return value of the function call. + """ + try: + return func(self, *args, **kwargs) + except Exception as e: + # Actor should be recreated by Ray. + if self.config.restart_failed_env_runners: + logger.exception(f"Worker exception caught during `apply()`: {e}") + # Small delay to allow logs messages to propagate. + time.sleep(self.config.delay_between_env_runner_restarts_s) + # Kill this worker so Ray Core can restart it. + sys.exit(1) + # Actor should be left dead. + else: + raise e + + +@DeveloperAPI +class FaultTolerantActorManager: + """A manager that is aware of the healthiness of remote actors. + + .. testcode:: + + import time + import ray + from ray.rllib.utils.actor_manager import FaultTolerantActorManager + + @ray.remote + class MyActor: + def apply(self, fn): + return fn(self) + + def do_something(self): + return True + + actors = [MyActor.remote() for _ in range(3)] + manager = FaultTolerantActorManager( + actors, max_remote_requests_in_flight_per_actor=2, + ) + + # Synchronous remote calls. + results = manager.foreach_actor(lambda actor: actor.do_something()) + # Print results ignoring returned errors. + print([r.get() for r in results.ignore_errors()]) + + # Asynchronous remote calls. + manager.foreach_actor_async(lambda actor: actor.do_something()) + time.sleep(2) # Wait for the tasks to finish. + for r in manager.fetch_ready_async_reqs(): + # Handle result and errors. + if r.ok: + print(r.get()) + else: + print("Error: {}".format(r.get())) + """ + + @dataclass + class _ActorState: + """State of a single actor.""" + + # Num of outstanding async requests for this actor. + num_in_flight_async_requests: int = 0 + # Whether this actor is in a healthy state. + is_healthy: bool = True + + def __init__( + self, + actors: Optional[List[ActorHandle]] = None, + max_remote_requests_in_flight_per_actor: int = 2, + init_id: int = 0, + ): + """Construct a FaultTolerantActorManager. + + Args: + actors: A list of ray remote actors to manage on. These actors must have an + ``apply`` method which takes a function with only one parameter (the + actor instance itself). + max_remote_requests_in_flight_per_actor: The maximum number of remote + requests that can be in flight per actor. Any requests made to the pool + that cannot be scheduled because the limit has been reached will be + dropped. This only applies to the asynchronous remote call mode. + init_id: The initial ID to use for the next remote actor. Default is 0. + """ + # For historic reasons, just start remote worker ID from 1, so they never + # collide with local worker ID (0). + self._next_id = init_id + + # Actors are stored in a map and indexed by a unique (int) ID. + self._actors: Dict[int, ActorHandle] = {} + self._remote_actor_states: Dict[int, self._ActorState] = {} + self._restored_actors = set() + self.add_actors(actors or []) + + # Maps outstanding async requests to the IDs of the actor IDs that + # are executing them. + self._in_flight_req_to_actor_id: Dict[ray.ObjectRef, int] = {} + + self._max_remote_requests_in_flight_per_actor = ( + max_remote_requests_in_flight_per_actor + ) + + # Useful metric. + self._num_actor_restarts = 0 + + @DeveloperAPI + def actor_ids(self) -> List[int]: + """Returns a list of all worker IDs (healthy or not).""" + return list(self._actors.keys()) + + @DeveloperAPI + def healthy_actor_ids(self) -> List[int]: + """Returns a list of worker IDs that are healthy.""" + return [k for k, v in self._remote_actor_states.items() if v.is_healthy] + + @DeveloperAPI + def add_actors(self, actors: List[ActorHandle]): + """Add a list of actors to the pool. + + Args: + actors: A list of ray remote actors to be added to the pool. + """ + for actor in actors: + self._actors[self._next_id] = actor + self._remote_actor_states[self._next_id] = self._ActorState() + self._next_id += 1 + + @DeveloperAPI + def remove_actor(self, actor_id: int) -> ActorHandle: + """Remove an actor from the pool. + + Args: + actor_id: ID of the actor to remove. + + Returns: + Handle to the actor that was removed. + """ + actor = self._actors[actor_id] + + # Remove the actor from the pool. + del self._actors[actor_id] + del self._remote_actor_states[actor_id] + self._restored_actors.discard(actor_id) + self._remove_async_state(actor_id) + + return actor + + @DeveloperAPI + def num_actors(self) -> int: + """Return the total number of actors in the pool.""" + return len(self._actors) + + @DeveloperAPI + def num_healthy_actors(self) -> int: + """Return the number of healthy remote actors.""" + return sum(s.is_healthy for s in self._remote_actor_states.values()) + + @DeveloperAPI + def total_num_restarts(self) -> int: + """Return the number of remote actors that have been restarted.""" + return self._num_actor_restarts + + @DeveloperAPI + def num_outstanding_async_reqs(self) -> int: + """Return the number of outstanding async requests.""" + return len(self._in_flight_req_to_actor_id) + + @DeveloperAPI + def is_actor_healthy(self, actor_id: int) -> bool: + """Whether a remote actor is in healthy state. + + Args: + actor_id: ID of the remote actor. + + Returns: + True if the actor is healthy, False otherwise. + """ + if actor_id not in self._remote_actor_states: + raise ValueError(f"Unknown actor id: {actor_id}") + return self._remote_actor_states[actor_id].is_healthy + + @DeveloperAPI + def set_actor_state(self, actor_id: int, healthy: bool) -> None: + """Update activate state for a specific remote actor. + + Args: + actor_id: ID of the remote actor. + healthy: Whether the remote actor is healthy. + """ + if actor_id not in self._remote_actor_states: + raise ValueError(f"Unknown actor id: {actor_id}") + + was_healthy = self._remote_actor_states[actor_id].is_healthy + # Set from unhealthy to healthy -> Add to restored set. + if not was_healthy and healthy: + self._restored_actors.add(actor_id) + # Set from healthy to unhealthy -> Remove from restored set. + elif was_healthy and not healthy: + self._restored_actors.discard(actor_id) + + self._remote_actor_states[actor_id].is_healthy = healthy + + if not healthy: + # Remove any async states. + self._remove_async_state(actor_id) + + @DeveloperAPI + def clear(self): + """Clean up managed actors.""" + for actor in self._actors.values(): + ray.kill(actor) + self._actors.clear() + self._remote_actor_states.clear() + self._restored_actors.clear() + self._in_flight_req_to_actor_id.clear() + + @DeveloperAPI + def foreach_actor( + self, + func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]], + *, + healthy_only: bool = True, + remote_actor_ids: Optional[List[int]] = None, + timeout_seconds: Optional[float] = None, + return_obj_refs: bool = False, + mark_healthy: bool = False, + ) -> RemoteCallResults: + """Calls the given function with each actor instance as arg. + + Automatically marks actors unhealthy if they crash during the remote call. + + Args: + func: A single, or a list of Callables, that get applied on the list + of specified remote actors. + healthy_only: If True, applies `func` only to actors currently tagged + "healthy", otherwise to all actors. If `healthy_only=False` and + `mark_healthy=True`, will send `func` to all actors and mark those + actors "healthy" that respond to the request within `timeout_seconds` + and are currently tagged as "unhealthy". + remote_actor_ids: Apply func on a selected set of remote actors. Use None + (default) for all actors. + timeout_seconds: Time to wait (in seconds) for results. Set this to 0.0 for + fire-and-forget. Set this to None (default) to wait infinitely (i.e. for + synchronous execution). + return_obj_refs: whether to return ObjectRef instead of actual results. + Note, for fault tolerance reasons, these returned ObjectRefs should + never be resolved with ray.get() outside of the context of this manager. + mark_healthy: Whether to mark all those actors healthy again that are + currently marked unhealthy AND that returned results from the remote + call (within the given `timeout_seconds`). + Note that actors are NOT set unhealthy, if they simply time out + (only if they return a RayActorError). + Also not that this setting is ignored if `healthy_only=True` (b/c this + setting only affects actors that are currently tagged as unhealthy). + + Returns: + The list of return values of all calls to `func(actor)`. The values may be + actual data returned or exceptions raised during the remote call in the + format of RemoteCallResults. + """ + remote_actor_ids = remote_actor_ids or self.actor_ids() + if healthy_only: + func, remote_actor_ids = self._filter_func_and_remote_actor_id_by_state( + func, remote_actor_ids + ) + + # Send out remote requests. + remote_calls = self._call_actors( + func=func, + remote_actor_ids=remote_actor_ids, + ) + + # Collect remote request results (if available given timeout and/or errors). + _, remote_results = self._fetch_result( + remote_actor_ids=remote_actor_ids, + remote_calls=remote_calls, + tags=[None] * len(remote_calls), + timeout_seconds=timeout_seconds, + return_obj_refs=return_obj_refs, + mark_healthy=mark_healthy, + ) + + return remote_results + + @DeveloperAPI + def foreach_actor_async( + self, + func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]], + tag: str = None, + *, + healthy_only: bool = True, + remote_actor_ids: List[int] = None, + ) -> int: + """Calls given functions against each actors without waiting for results. + + Args: + func: A single Callable applied to all specified remote actors or a list + of Callables, that get applied on the list of specified remote actors. + In the latter case, both list of Callables and list of specified actors + must have the same length. + tag: A tag to identify the results from this async call. + healthy_only: If True, applies `func` only to actors currently tagged + "healthy", otherwise to all actors. If `healthy_only=False` and + later, `self.fetch_ready_async_reqs()` is called with + `mark_healthy=True`, will send `func` to all actors and mark those + actors "healthy" that respond to the request within `timeout_seconds` + and are currently tagged as "unhealthy". + remote_actor_ids: Apply func on a selected set of remote actors. + Note, for fault tolerance reasons, these returned ObjectRefs should + never be resolved with ray.get() outside of the context of this manager. + + Returns: + The number of async requests that are actually fired. + """ + # TODO(avnishn, jungong): so thinking about this a bit more, it would be the + # best if we can attach multiple tags to an async all, like basically this + # parameter should be tags: + # For sync calls, tags would be (). + # For async call users, they can attached multiple tags for a single call, like + # ("rollout_worker", "sync_weight"). + # For async fetch result, we can also specify a single, or list of tags. For + # example, ("eval", "sample") will fetch all the sample() calls on eval + # workers. + remote_actor_ids = remote_actor_ids or self.actor_ids() + + if healthy_only: + func, remote_actor_ids = self._filter_func_and_remote_actor_id_by_state( + func, remote_actor_ids + ) + + if isinstance(func, list) and len(func) != len(remote_actor_ids): + raise ValueError( + f"The number of functions specified {len(func)} must match " + f"the number of remote actor indices {len(remote_actor_ids)}." + ) + + num_calls_to_make: Dict[int, int] = defaultdict(lambda: 0) + # Drop calls to actors that are too busy. + if isinstance(func, list): + limited_func = [] + limited_remote_actor_ids = [] + for i, f in zip(remote_actor_ids, func): + num_outstanding_reqs = self._remote_actor_states[ + i + ].num_in_flight_async_requests + if ( + num_outstanding_reqs + num_calls_to_make[i] + < self._max_remote_requests_in_flight_per_actor + ): + num_calls_to_make[i] += 1 + limited_func.append(f) + limited_remote_actor_ids.append(i) + else: + limited_func = func + limited_remote_actor_ids = [] + for i in remote_actor_ids: + num_outstanding_reqs = self._remote_actor_states[ + i + ].num_in_flight_async_requests + if ( + num_outstanding_reqs + num_calls_to_make[i] + < self._max_remote_requests_in_flight_per_actor + ): + num_calls_to_make[i] += 1 + limited_remote_actor_ids.append(i) + + remote_calls = self._call_actors( + func=limited_func, + remote_actor_ids=limited_remote_actor_ids, + ) + + # Save these as outstanding requests. + for id, call in zip(limited_remote_actor_ids, remote_calls): + self._remote_actor_states[id].num_in_flight_async_requests += 1 + self._in_flight_req_to_actor_id[call] = (tag, id) + + return len(remote_calls) + + @DeveloperAPI + def fetch_ready_async_reqs( + self, + *, + tags: Union[str, List[str], Tuple[str]] = (), + timeout_seconds: Optional[float] = 0.0, + return_obj_refs: bool = False, + mark_healthy: bool = False, + ) -> RemoteCallResults: + """Get results from outstanding async requests that are ready. + + Automatically mark actors unhealthy if they fail to respond. + + Note: If tags is an empty tuple then results from all ready async requests are + returned. + + Args: + timeout_seconds: ray.get() timeout. Default is 0, which only fetched those + results (immediately) that are already ready. + tags: A tag or a list of tags to identify the results from this async call. + return_obj_refs: Whether to return ObjectRef instead of actual results. + mark_healthy: Whether to mark all those actors healthy again that are + currently marked unhealthy AND that returned results from the remote + call (within the given `timeout_seconds`). + Note that actors are NOT set to unhealthy, if they simply time out, + meaning take a longer time to fulfil the remote request. We only ever + mark an actor unhealthy, if they raise a RayActorError inside the remote + request. + Also note that this settings is ignored if the preceding + `foreach_actor_async()` call used the `healthy_only=True` argument (b/c + `mark_healthy` only affects actors that are currently tagged as + unhealthy). + + Returns: + A list of return values of all calls to `func(actor)` that are ready. + The values may be actual data returned or exceptions raised during the + remote call in the format of RemoteCallResults. + """ + # Construct the list of in-flight requests filtered by tag. + remote_calls, remote_actor_ids, valid_tags = self._filter_calls_by_tag(tags) + ready, remote_results = self._fetch_result( + remote_actor_ids=remote_actor_ids, + remote_calls=remote_calls, + tags=valid_tags, + timeout_seconds=timeout_seconds, + return_obj_refs=return_obj_refs, + mark_healthy=mark_healthy, + ) + + for obj_ref, result in zip(ready, remote_results): + # Decrease outstanding request on this actor by 1. + self._remote_actor_states[result.actor_id].num_in_flight_async_requests -= 1 + # Also, remove this call here from the in-flight list, + # obj_refs may have already been removed when we disable an actor. + if obj_ref in self._in_flight_req_to_actor_id: + del self._in_flight_req_to_actor_id[obj_ref] + + return remote_results + + @staticmethod + def handle_remote_call_result_errors( + results_or_errors: RemoteCallResults, + *, + ignore_ray_errors: bool, + ) -> None: + """Checks given results for application errors and raises them if necessary. + + Args: + results_or_errors: The results or errors to check. + ignore_ray_errors: Whether to ignore RayErrors within the elements of + `results_or_errors`. + """ + for result_or_error in results_or_errors: + # Good result. + if result_or_error.ok: + continue + # RayError, but we ignore it. + elif ignore_ray_errors: + logger.exception(result_or_error.get()) + # Raise RayError. + else: + raise result_or_error.get() + + @DeveloperAPI + def probe_unhealthy_actors( + self, + timeout_seconds: Optional[float] = None, + mark_healthy: bool = False, + ) -> List[int]: + """Ping all unhealthy actors to try bringing them back. + + Args: + timeout_seconds: Timeout in seconds (to avoid pinging hanging workers + indefinitely). + mark_healthy: Whether to mark all those actors healthy again that are + currently marked unhealthy AND that respond to the `ping` remote request + (within the given `timeout_seconds`). + Note that actors are NOT set to unhealthy, if they simply time out, + meaning take a longer time to fulfil the remote request. We only ever + mark and actor unhealthy, if they return a RayActorError from the remote + request. + Also note that this settings is ignored if `healthy_only=True` (b/c this + setting only affects actors that are currently tagged as unhealthy). + + Returns: + A list of actor IDs that were restored by the `ping.remote()` call PLUS + those actors that were previously restored via other remote requests. + The cached set of such previously restored actors will be erased in this + call. + """ + # Collect recently restored actors (from `self._fetch_result` calls other than + # the one triggered here via the `ping`). + restored_actors = list(self._restored_actors) + self._restored_actors.clear() + + # Probe all unhealthy actors via a simple `ping()`. + unhealthy_actor_ids = [ + actor_id + for actor_id in self.actor_ids() + if not self.is_actor_healthy(actor_id) + ] + # No unhealthy actors currently -> Return recently restored ones. + if not unhealthy_actor_ids: + return restored_actors + + # Some unhealthy actors -> `ping()` all of them to trigger a new fetch and + # capture all restored ones. + remote_results = self.foreach_actor( + func=lambda actor: actor.ping(), + remote_actor_ids=unhealthy_actor_ids, + healthy_only=False, # We specifically want to ping unhealthy actors. + timeout_seconds=timeout_seconds, + mark_healthy=mark_healthy, + ) + + # Return previously restored actors AND actors restored via the `ping()` call. + return restored_actors + [ + result.actor_id for result in remote_results if result.ok + ] + + def _call_actors( + self, + func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]], + *, + remote_actor_ids: List[int] = None, + ) -> List[ray.ObjectRef]: + """Apply functions on a list of remote actors. + + Args: + func: A single, or a list of Callables, that get applied on the list + of specified remote actors. + remote_actor_ids: Apply func on this selected set of remote actors. + + Returns: + A list of ObjectRefs returned from the remote calls. + """ + if isinstance(func, list): + assert len(remote_actor_ids) == len( + func + ), "Funcs must have the same number of callables as actor indices." + + if remote_actor_ids is None: + remote_actor_ids = self.actor_ids() + + if isinstance(func, list): + calls = [ + self._actors[i].apply.remote(f) for i, f in zip(remote_actor_ids, func) + ] + else: + calls = [self._actors[i].apply.remote(func) for i in remote_actor_ids] + + return calls + + @DeveloperAPI + def _fetch_result( + self, + *, + remote_actor_ids: List[int], + remote_calls: List[ray.ObjectRef], + tags: List[str], + timeout_seconds: Optional[float] = None, + return_obj_refs: bool = False, + mark_healthy: bool = False, + ) -> Tuple[List[ray.ObjectRef], RemoteCallResults]: + """Try fetching results from remote actor calls. + + Mark whether an actor is healthy or not accordingly. + + Args: + remote_actor_ids: IDs of the actors these remote + calls were fired against. + remote_calls: List of remote calls to fetch. + tags: List of tags used for identifying the remote calls. + timeout_seconds: Timeout (in sec) for the ray.wait() call. Default is None, + meaning wait indefinitely for all results. + return_obj_refs: Whether to return ObjectRef instead of actual results. + mark_healthy: Whether to mark certain actors healthy based on the results + of these remote calls. Useful, for example, to make sure actors + do not come back without proper state restoration. + + Returns: + A list of ready ObjectRefs mapping to the results of those calls. + """ + # Notice that we do not return the refs to any unfinished calls to the + # user, since it is not safe to handle such remote actor calls outside the + # context of this actor manager. These requests are simply dropped. + timeout = float(timeout_seconds) if timeout_seconds is not None else None + + # This avoids calling ray.init() in the case of 0 remote calls. + # This is useful if the number of remote workers is 0. + if not remote_calls: + return [], RemoteCallResults() + + readies, _ = ray.wait( + remote_calls, + num_returns=len(remote_calls), + timeout=timeout, + # Make sure remote results are fetched locally in parallel. + fetch_local=not return_obj_refs, + ) + + # Remote data should already be fetched to local object store at this point. + remote_results = RemoteCallResults() + for ready in readies: + # Find the corresponding actor ID for this remote call. + actor_id = remote_actor_ids[remote_calls.index(ready)] + tag = tags[remote_calls.index(ready)] + + # If caller wants ObjectRefs, return directly without resolving. + if return_obj_refs: + remote_results.add_result(actor_id, ResultOrError(result=ready), tag) + continue + + # Try getting the ready results. + try: + result = ray.get(ready) + + # Any error type other than `RayError` happening during ray.get() -> + # Throw exception right here (we don't know how to handle these non-remote + # worker issues and should therefore crash). + except RayError as e: + # Return error to the user. + remote_results.add_result(actor_id, ResultOrError(error=e), tag) + + # Mark the actor as unhealthy, take it out of service, and wait for + # Ray Core to restore it. + if self.is_actor_healthy(actor_id): + logger.error( + f"Ray error ({str(e)}), taking actor {actor_id} out of service." + ) + self.set_actor_state(actor_id, healthy=False) + + # If no errors, add result to `RemoteCallResults` to be returned. + else: + # Return valid result to the user. + remote_results.add_result(actor_id, ResultOrError(result=result), tag) + + # Actor came back from an unhealthy state. Mark this actor as healthy + # and add it to our healthy set. + if mark_healthy and not self.is_actor_healthy(actor_id): + logger.warning( + f"Bringing previously unhealthy, now-healthy actor {actor_id} " + "back into service." + ) + self.set_actor_state(actor_id, healthy=True) + self._num_actor_restarts += 1 + + # Make sure, to-be-returned results are sound. + assert len(readies) == len(remote_results) + + return readies, remote_results + + def _filter_func_and_remote_actor_id_by_state( + self, + func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]], + remote_actor_ids: List[int], + ): + """Filter out func and remote worker ids by actor state. + + Args: + func: A single, or a list of Callables. + remote_actor_ids: IDs of potential remote workers to apply func on. + + Returns: + A tuple of (filtered func, filtered remote worker ids). + """ + if isinstance(func, list): + assert len(remote_actor_ids) == len( + func + ), "Func must have the same number of callables as remote actor ids." + # We are given a list of functions to apply. + # Need to filter the functions together with worker IDs. + temp_func = [] + temp_remote_actor_ids = [] + for f, i in zip(func, remote_actor_ids): + if self.is_actor_healthy(i): + temp_func.append(f) + temp_remote_actor_ids.append(i) + func = temp_func + remote_actor_ids = temp_remote_actor_ids + else: + # Simply filter the worker IDs. + remote_actor_ids = [i for i in remote_actor_ids if self.is_actor_healthy(i)] + + return func, remote_actor_ids + + def _filter_calls_by_tag( + self, tags: Union[str, List[str], Tuple[str]] + ) -> Tuple[List[ray.ObjectRef], List[ActorHandle], List[str]]: + """Return all the in flight requests that match the given tags, if any. + + Args: + tags: A str or a list/tuple of str. If tags is empty, return all the in + flight requests. + + Returns: + A tuple consisting of a list of the remote calls that match the tag(s), + a list of the corresponding remote actor IDs for these calls (same length), + and a list of the tags corresponding to these calls (same length). + """ + if isinstance(tags, str): + tags = {tags} + elif isinstance(tags, (list, tuple)): + tags = set(tags) + else: + raise ValueError( + f"tags must be either a str or a list/tuple of str, got {type(tags)}." + ) + remote_calls = [] + remote_actor_ids = [] + valid_tags = [] + for call, (tag, actor_id) in self._in_flight_req_to_actor_id.items(): + # the default behavior is to return all ready results. + if len(tags) == 0 or tag in tags: + remote_calls.append(call) + remote_actor_ids.append(actor_id) + valid_tags.append(tag) + + return remote_calls, remote_actor_ids, valid_tags + + def _remove_async_state(self, actor_id: int): + """Remove internal async state of for a given actor. + + This is called when an actor is removed from the pool or being marked + unhealthy. + + Args: + actor_id: The id of the actor. + """ + # Remove any outstanding async requests for this actor. + # Use `list` here to not change a looped generator while we mutate the + # underlying dict. + for id, req in list(self._in_flight_req_to_actor_id.items()): + if id == actor_id: + del self._in_flight_req_to_actor_id[req] + + def actors(self): + # TODO(jungong) : remove this API once EnvRunnerGroup.remote_workers() + # and EnvRunnerGroup._remote_workers() are removed. + return self._actors diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actors.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actors.py new file mode 100644 index 0000000000000000000000000000000000000000..d56dcdbd773f920e6b922eeeceb71e4eb663d68c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/actors.py @@ -0,0 +1,258 @@ +from collections import defaultdict, deque +import logging +import platform +from typing import Any, Dict, List, Optional, Sequence, Tuple, Type + +import ray +from ray.actor import ActorClass, ActorHandle + +logger = logging.getLogger(__name__) + + +class TaskPool: + """Helper class for tracking the status of many in-flight actor tasks.""" + + def __init__(self): + self._tasks = {} + self._objects = {} + self._fetching = deque() + + def add(self, worker, all_obj_refs): + if isinstance(all_obj_refs, list): + obj_ref = all_obj_refs[0] + else: + obj_ref = all_obj_refs + self._tasks[obj_ref] = worker + self._objects[obj_ref] = all_obj_refs + + def completed(self, blocking_wait=False): + pending = list(self._tasks) + if pending: + ready, _ = ray.wait(pending, num_returns=len(pending), timeout=0) + if not ready and blocking_wait: + ready, _ = ray.wait(pending, num_returns=1, timeout=10.0) + for obj_ref in ready: + yield (self._tasks.pop(obj_ref), self._objects.pop(obj_ref)) + + def completed_prefetch(self, blocking_wait=False, max_yield=999): + """Similar to completed but only returns once the object is local. + + Assumes obj_ref only is one id.""" + + for worker, obj_ref in self.completed(blocking_wait=blocking_wait): + self._fetching.append((worker, obj_ref)) + + for _ in range(max_yield): + if not self._fetching: + break + + yield self._fetching.popleft() + + def reset_workers(self, workers): + """Notify that some workers may be removed.""" + for obj_ref, ev in self._tasks.copy().items(): + if ev not in workers: + del self._tasks[obj_ref] + del self._objects[obj_ref] + + # We want to keep the same deque reference so that we don't suffer from + # stale references in generators that are still in flight + for _ in range(len(self._fetching)): + ev, obj_ref = self._fetching.popleft() + if ev in workers: + # Re-queue items that are still valid + self._fetching.append((ev, obj_ref)) + + @property + def count(self): + return len(self._tasks) + + +def create_colocated_actors( + actor_specs: Sequence[Tuple[Type, Any, Any, int]], + node: Optional[str] = "localhost", + max_attempts: int = 10, +) -> Dict[Type, List[ActorHandle]]: + """Create co-located actors of any type(s) on any node. + + Args: + actor_specs: Tuple/list with tuples consisting of: 1) The + (already @ray.remote) class(es) to construct, 2) c'tor args, + 3) c'tor kwargs, and 4) the number of actors of that class with + given args/kwargs to construct. + node: The node to co-locate the actors on. By default ("localhost"), + place the actors on the node the caller of this function is + located on. Use None for indicating that any (resource fulfilling) + node in the cluster may be used. + max_attempts: The maximum number of co-location attempts to + perform before throwing an error. + + Returns: + A dict mapping the created types to the list of n ActorHandles + created (and co-located) for that type. + """ + if node == "localhost": + node = platform.node() + + # Maps each entry in `actor_specs` to lists of already co-located actors. + ok = [[] for _ in range(len(actor_specs))] + + # Try n times to co-locate all given actor types (`actor_specs`). + # With each (failed) attempt, increase the number of actors we try to + # create (on the same node), then kill the ones that have been created in + # excess. + for attempt in range(max_attempts): + # If any attempt to co-locate fails, set this to False and we'll do + # another attempt. + all_good = True + # Process all `actor_specs` in sequence. + for i, (typ, args, kwargs, count) in enumerate(actor_specs): + args = args or [] # Allow None. + kwargs = kwargs or {} # Allow None. + # We don't have enough actors yet of this spec co-located on + # the desired node. + if len(ok[i]) < count: + co_located = try_create_colocated( + cls=typ, + args=args, + kwargs=kwargs, + count=count * (attempt + 1), + node=node, + ) + # If node did not matter (None), from here on, use the host + # that the first actor(s) are already co-located on. + if node is None: + node = ray.get(co_located[0].get_host.remote()) + # Add the newly co-located actors to the `ok` list. + ok[i].extend(co_located) + # If we still don't have enough -> We'll have to do another + # attempt. + if len(ok[i]) < count: + all_good = False + # We created too many actors for this spec -> Kill/truncate + # the excess ones. + if len(ok[i]) > count: + for a in ok[i][count:]: + a.__ray_terminate__.remote() + ok[i] = ok[i][:count] + + # All `actor_specs` have been fulfilled, return lists of + # co-located actors. + if all_good: + return ok + + raise Exception("Unable to create enough colocated actors -> aborting.") + + +def try_create_colocated( + cls: Type[ActorClass], + args: List[Any], + count: int, + kwargs: Optional[List[Any]] = None, + node: Optional[str] = "localhost", +) -> List[ActorHandle]: + """Tries to co-locate (same node) a set of Actors of the same type. + + Returns a list of successfully co-located actors. All actors that could + not be co-located (with the others on the given node) will not be in this + list. + + Creates each actor via it's remote() constructor and then checks, whether + it has been co-located (on the same node) with the other (already created) + ones. If not, terminates the just created actor. + + Args: + cls: The Actor class to use (already @ray.remote "converted"). + args: List of args to pass to the Actor's constructor. One item + per to-be-created actor (`count`). + count: Number of actors of the given `cls` to construct. + kwargs: Optional list of kwargs to pass to the Actor's constructor. + One item per to-be-created actor (`count`). + node: The node to co-locate the actors on. By default ("localhost"), + place the actors on the node the caller of this function is + located on. If None, will try to co-locate all actors on + any available node. + + Returns: + List containing all successfully co-located actor handles. + """ + if node == "localhost": + node = platform.node() + + kwargs = kwargs or {} + actors = [cls.remote(*args, **kwargs) for _ in range(count)] + co_located, non_co_located = split_colocated(actors, node=node) + logger.info("Got {} colocated actors of {}".format(len(co_located), count)) + for a in non_co_located: + a.__ray_terminate__.remote() + return co_located + + +def split_colocated( + actors: List[ActorHandle], + node: Optional[str] = "localhost", +) -> Tuple[List[ActorHandle], List[ActorHandle]]: + """Splits up given actors into colocated (on same node) and non colocated. + + The co-location criterion depends on the `node` given: + If given (or default: platform.node()): Consider all actors that are on + that node "colocated". + If None: Consider the largest sub-set of actors that are all located on + the same node (whatever that node is) as "colocated". + + Args: + actors: The list of actor handles to split into "colocated" and + "non colocated". + node: The node defining "colocation" criterion. If provided, consider + thos actors "colocated" that sit on this node. If None, use the + largest subset within `actors` that are sitting on the same + (any) node. + + Returns: + Tuple of two lists: 1) Co-located ActorHandles, 2) non co-located + ActorHandles. + """ + if node == "localhost": + node = platform.node() + + # Get nodes of all created actors. + hosts = ray.get([a.get_host.remote() for a in actors]) + + # If `node` not provided, use the largest group of actors that sit on the + # same node, regardless of what that node is. + if node is None: + node_groups = defaultdict(set) + for host, actor in zip(hosts, actors): + node_groups[host].add(actor) + max_ = -1 + largest_group = None + for host in node_groups: + if max_ < len(node_groups[host]): + max_ = len(node_groups[host]) + largest_group = host + non_co_located = [] + for host in node_groups: + if host != largest_group: + non_co_located.extend(list(node_groups[host])) + return list(node_groups[largest_group]), non_co_located + # Node provided (or default: localhost): Consider those actors "colocated" + # that were placed on `node`. + else: + # Split into co-located (on `node) and non-co-located (not on `node`). + co_located = [] + non_co_located = [] + for host, a in zip(hosts, actors): + # This actor has been placed on the correct node. + if host == node: + co_located.append(a) + # This actor has been placed on a different node. + else: + non_co_located.append(a) + return co_located, non_co_located + + +def drop_colocated(actors: List[ActorHandle]) -> List[ActorHandle]: + colocated, non_colocated = split_colocated(actors) + for a in colocated: + a.__ray_terminate__.remote() + return non_colocated diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/annotations.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..6824412b354f1f18df9d7a663e99471835680994 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/annotations.py @@ -0,0 +1,213 @@ +from ray.rllib.utils.deprecation import Deprecated +from ray.util.annotations import _mark_annotated + + +def override(parent_cls): + """Decorator for documenting method overrides. + + Args: + parent_cls: The superclass that provides the overridden method. If + `parent_class` does not actually have the method or the class, in which + method is defined is not a subclass of `parent_class`, an error is raised. + + .. testcode:: + :skipif: True + + from ray.rllib.policy import Policy + class TorchPolicy(Policy): + ... + # Indicates that `TorchPolicy.loss()` overrides the parent + # Policy class' own `loss method. Leads to an error if Policy + # does not have a `loss` method. + + @override(Policy) + def loss(self, model, action_dist, train_batch): + ... + + """ + + class OverrideCheck: + def __init__(self, func, expected_parent_cls): + self.func = func + self.expected_parent_cls = expected_parent_cls + + def __set_name__(self, owner, name): + # Check if the owner (the class) is a subclass of the expected base class + if not issubclass(owner, self.expected_parent_cls): + raise TypeError( + f"When using the @override decorator, {owner.__name__} must be a " + f"subclass of {parent_cls.__name__}!" + ) + # Set the function as a regular method on the class. + setattr(owner, name, self.func) + + def decorator(method): + # Check, whether `method` is actually defined by the parent class. + if method.__name__ not in dir(parent_cls): + raise NameError( + f"When using the @override decorator, {method.__name__} must override " + f"the respective method (with the same name) of {parent_cls.__name__}!" + ) + + # Check if the class is a subclass of the expected base class + OverrideCheck(method, parent_cls) + return method + + return decorator + + +def PublicAPI(obj): + """Decorator for documenting public APIs. + + Public APIs are classes and methods exposed to end users of RLlib. You + can expect these APIs to remain stable across RLlib releases. + + Subclasses that inherit from a ``@PublicAPI`` base class can be + assumed part of the RLlib public API as well (e.g., all Algorithm classes + are in public API because Algorithm is ``@PublicAPI``). + + In addition, you can assume all algo configurations are part of their + public API as well. + + .. testcode:: + :skipif: True + + # Indicates that the `Algorithm` class is exposed to end users + # of RLlib and will remain stable across RLlib releases. + from ray import tune + @PublicAPI + class Algorithm(tune.Trainable): + ... + """ + + _mark_annotated(obj) + return obj + + +def DeveloperAPI(obj): + """Decorator for documenting developer APIs. + + Developer APIs are classes and methods explicitly exposed to developers + for the purposes of building custom algorithms or advanced training + strategies on top of RLlib internals. You can generally expect these APIs + to be stable sans minor changes (but less stable than public APIs). + + Subclasses that inherit from a ``@DeveloperAPI`` base class can be + assumed part of the RLlib developer API as well. + + .. testcode:: + :skipif: True + + # Indicates that the `TorchPolicy` class is exposed to end users + # of RLlib and will remain (relatively) stable across RLlib + # releases. + from ray.rllib.policy import Policy + @DeveloperAPI + class TorchPolicy(Policy): + ... + """ + + _mark_annotated(obj) + return obj + + +def ExperimentalAPI(obj): + """Decorator for documenting experimental APIs. + + Experimental APIs are classes and methods that are in development and may + change at any time in their development process. You should not expect + these APIs to be stable until their tag is changed to `DeveloperAPI` or + `PublicAPI`. + + Subclasses that inherit from a ``@ExperimentalAPI`` base class can be + assumed experimental as well. + + .. testcode:: + :skipif: True + + from ray.rllib.policy import Policy + class TorchPolicy(Policy): + ... + # Indicates that the `TorchPolicy.loss` method is a new and + # experimental API and may change frequently in future + # releases. + @ExperimentalAPI + def loss(self, model, action_dist, train_batch): + ... + """ + + _mark_annotated(obj) + return obj + + +def OldAPIStack(obj): + """Decorator for classes/methods/functions belonging to the old API stack. + + These should be deprecated at some point after Ray 3.0 (RLlib GA). + It is recommended for users to start exploring (and coding against) the new API + stack instead. + """ + # No effect yet. + + _mark_annotated(obj) + return obj + + +def OverrideToImplementCustomLogic(obj): + """Users should override this in their sub-classes to implement custom logic. + + Used in Algorithm and Policy to tag methods that need overriding, e.g. + `Policy.loss()`. + + .. testcode:: + :skipif: True + + from ray.rllib.policy.torch_policy import TorchPolicy + @overrides(TorchPolicy) + @OverrideToImplementCustomLogic + def loss(self, ...): + # implement custom loss function here ... + # ... w/o calling the corresponding `super().loss()` method. + ... + + """ + obj.__is_overridden__ = False + return obj + + +def OverrideToImplementCustomLogic_CallToSuperRecommended(obj): + """Users should override this in their sub-classes to implement custom logic. + + Thereby, it is recommended (but not required) to call the super-class' + corresponding method. + + Used in Algorithm and Policy to tag methods that need overriding, but the + super class' method should still be called, e.g. + `Algorithm.setup()`. + + .. testcode:: + :skipif: True + + from ray import tune + @overrides(tune.Trainable) + @OverrideToImplementCustomLogic_CallToSuperRecommended + def setup(self, config): + # implement custom setup logic here ... + super().setup(config) + # ... or here (after having called super()'s setup method. + """ + obj.__is_overridden__ = False + return obj + + +def is_overridden(obj): + """Check whether a function has been overridden. + + Note, this only works for API calls decorated with OverrideToImplementCustomLogic + or OverrideToImplementCustomLogic_CallToSuperRecommended. + """ + return getattr(obj, "__is_overridden__", True) + + +# Backward compatibility. +Deprecated = Deprecated diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/checkpoints.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/checkpoints.py new file mode 100644 index 0000000000000000000000000000000000000000..dc89d0e2fa4c2672af300a8c359b4d4b06915329 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/checkpoints.py @@ -0,0 +1,973 @@ +import abc +import logging +import json +import os +from packaging import version +import pathlib +import re +import tempfile +from types import MappingProxyType +from typing import Any, Collection, Dict, List, Optional, Tuple, Union + +import pyarrow.fs + +import ray +import ray.cloudpickle as pickle +from ray.rllib.core import ( + COMPONENT_LEARNER, + COMPONENT_LEARNER_GROUP, + COMPONENT_RL_MODULE, +) +from ray.rllib.utils import force_list +from ray.rllib.utils.actor_manager import FaultTolerantActorManager +from ray.rllib.utils.annotations import ( + OverrideToImplementCustomLogic_CallToSuperRecommended, +) +from ray.rllib.utils.serialization import NOT_SERIALIZABLE, serialize_type +from ray.rllib.utils.typing import StateDict +from ray.train import Checkpoint +from ray.tune.utils.file_transfer import sync_dir_between_nodes +from ray.util import log_once +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(__name__) + +# The current checkpoint version used by RLlib for Algorithm and Policy checkpoints. +# History: +# 0.1: Ray 2.0.0 +# A single `checkpoint-[iter num]` file for Algorithm checkpoints +# within the checkpoint directory. Policy checkpoints not supported across all +# DL frameworks. + +# 1.0: Ray >=2.1.0 +# An algorithm_state.pkl file for the state of the Algorithm (excluding +# individual policy states). +# One sub-dir inside the "policies" sub-dir for each policy with a +# dedicated policy_state.pkl in it for the policy state. + +# 1.1: Same as 1.0, but has a new "format" field in the rllib_checkpoint.json file +# indicating, whether the checkpoint is `cloudpickle` (default) or `msgpack`. + +# 1.2: Introduces the checkpoint for the new Learner API if the Learner API is enabled. + +# 2.0: Introduces the Checkpointable API for all components on the new API stack +# (if the Learner-, RLModule, EnvRunner, and ConnectorV2 APIs are enabled). + +CHECKPOINT_VERSION = version.Version("1.1") +CHECKPOINT_VERSION_LEARNER = version.Version("1.2") +CHECKPOINT_VERSION_LEARNER_AND_ENV_RUNNER = version.Version("2.0") + + +@PublicAPI(stability="alpha") +class Checkpointable(abc.ABC): + """Abstract base class for a component of RLlib that can be checkpointed to disk. + + Subclasses must implement the following APIs: + - save_to_path() + - restore_from_path() + - from_checkpoint() + - get_state() + - set_state() + - get_ctor_args_and_kwargs() + - get_metadata() + - get_checkpointable_components() + """ + + # The state file for the implementing class. + # This file contains any state information that does NOT belong to any subcomponent + # of the implementing class (which are `Checkpointable` themselves and thus should + # have their own state- and metadata files). + # After a `save_to_path([path])` this file can be found directly in: `path/`. + STATE_FILE_NAME = "state.pkl" + + # The filename of the pickle file that contains the class information of the + # Checkpointable as well as all constructor args to be passed to such a class in + # order to construct a new instance. + CLASS_AND_CTOR_ARGS_FILE_NAME = "class_and_ctor_args.pkl" + + # Subclasses may set this to their own metadata filename. + # The dict returned by self.get_metadata() is stored in this JSON file. + METADATA_FILE_NAME = "metadata.json" + + def save_to_path( + self, + path: Optional[Union[str, pathlib.Path]] = None, + *, + state: Optional[StateDict] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + ) -> str: + """Saves the state of the implementing class (or `state`) to `path`. + + The state of the implementing class is always saved in the following format: + + .. testcode:: + :skipif: True + + path/ + [component1]/ + [component1 subcomponentA]/ + ... + [component1 subcomponentB]/ + ... + [component2]/ + ... + [cls.METADATA_FILE_NAME] (json) + [cls.STATE_FILE_NAME] (pkl) + + The main logic is to loop through all subcomponents of this Checkpointable + and call their respective `save_to_path` methods. Then save the remaining + (non subcomponent) state to this Checkpointable's STATE_FILE_NAME. + In the exception that a component is a FaultTolerantActorManager instance, + instead of calling `save_to_path` directly on that manager, the first healthy + actor is interpreted as the component and its `save_to_path` method is called. + Even if that actor is located on another node, the created file is automatically + synced to the local node. + + Args: + path: The path to the directory to save the state of the implementing class + to. If `path` doesn't exist or is None, then a new directory will be + created (and returned). + state: An optional state dict to be used instead of getting a new state of + the implementing class through `self.get_state()`. + filesystem: PyArrow FileSystem to use to access data at the `path`. + If not specified, this is inferred from the URI scheme of `path`. + + Returns: + The path (str) where the state has been saved. + """ + + # If no path is given create a local temporary directory. + if path is None: + import uuid + + # Get the location of the temporary directory on the OS. + tmp_dir = pathlib.Path(tempfile.gettempdir()) + # Create a random directory name. + random_dir_name = str(uuid.uuid4()) + # Create the path, but do not craet the directory on the + # filesystem, yet. This is done by `PyArrow`. + path = path or tmp_dir / random_dir_name + + # We need a string path for `pyarrow.fs.FileSystem.from_uri`. + path = path if isinstance(path, str) else path.as_posix() + + # If we have no filesystem, figure it out. + if path and not filesystem: + # Note the path needs to be a path that is relative to the + # filesystem (e.g. `gs://tmp/...` -> `tmp/...`). + filesystem, path = pyarrow.fs.FileSystem.from_uri(path) + + # Make sure, path exists. + filesystem.create_dir(path, recursive=True) + + # Convert to `pathlib.Path` for easy handling. + path = pathlib.Path(path) + + # Write metadata file to disk. + metadata = self.get_metadata() + if "checkpoint_version" not in metadata: + metadata["checkpoint_version"] = str( + CHECKPOINT_VERSION_LEARNER_AND_ENV_RUNNER + ) + with filesystem.open_output_stream( + (path / self.METADATA_FILE_NAME).as_posix() + ) as f: + f.write(json.dumps(metadata).encode("utf-8")) + + # Write the class and constructor args information to disk. + with filesystem.open_output_stream( + (path / self.CLASS_AND_CTOR_ARGS_FILE_NAME).as_posix() + ) as f: + pickle.dump( + { + "class": type(self), + "ctor_args_and_kwargs": self.get_ctor_args_and_kwargs(), + }, + f, + ) + + # Get the entire state of this Checkpointable, or use provided `state`. + _state_provided = state is not None + state = state or self.get_state( + not_components=[c[0] for c in self.get_checkpointable_components()] + ) + + # Write components of `self` that themselves are `Checkpointable`. + for comp_name, comp in self.get_checkpointable_components(): + # If subcomponent's name is not in `state`, ignore it and don't write this + # subcomponent's state to disk. + if _state_provided and comp_name not in state: + continue + comp_path = path / comp_name + + # If component is an ActorManager, save the manager's first healthy + # actor's state to disk (even if it's on another node, in which case, we'll + # sync the generated file(s) back to this node). + if isinstance(comp, FaultTolerantActorManager): + actor_to_use = comp.healthy_actor_ids()[0] + + def _get_ip(_=None): + import ray + + return ray.util.get_node_ip_address() + + _result = next( + iter( + comp.foreach_actor( + _get_ip, + remote_actor_ids=[actor_to_use], + ) + ) + ) + if not _result.ok: + raise _result.get() + worker_ip_addr = _result.get() + self_ip_addr = _get_ip() + + # Save the state to a temporary location on the `actor_to_use`'s + # node. + comp_state_ref = None + if _state_provided: + comp_state_ref = ray.put(state.pop(comp_name)) + + if worker_ip_addr == self_ip_addr: + comp.foreach_actor( + lambda w, _path=comp_path, _state=comp_state_ref: ( + w.save_to_path( + _path, + state=( + ray.get(_state) + if _state is not None + else w.get_state() + ), + ) + ), + remote_actor_ids=[actor_to_use], + ) + else: + # Save the checkpoint to the temporary directory on the worker. + def _save(w, _state=comp_state_ref): + import tempfile + + # Create a temporary directory on the worker. + tmpdir = tempfile.mkdtemp() + w.save_to_path( + tmpdir, + state=( + ray.get(_state) if _state is not None else w.get_state() + ), + ) + return tmpdir + + _result = next( + iter(comp.foreach_actor(_save, remote_actor_ids=[actor_to_use])) + ) + if not _result.ok: + raise _result.get() + worker_temp_dir = _result.get() + + # Sync the temporary directory from the worker to this node. + sync_dir_between_nodes( + worker_ip_addr, + worker_temp_dir, + self_ip_addr, + str(comp_path), + ) + + # Remove the temporary directory on the worker. + def _rmdir(_, _dir=worker_temp_dir): + import shutil + + shutil.rmtree(_dir) + + comp.foreach_actor(_rmdir, remote_actor_ids=[actor_to_use]) + + # Local component (instance stored in a property of `self`). + else: + if _state_provided: + comp_state = state.pop(comp_name) + else: + comp_state = self.get_state(components=comp_name)[comp_name] + # By providing the `state` arg, we make sure that the component does not + # have to call its own `get_state()` anymore, but uses what's provided + # here. + comp.save_to_path(comp_path, filesystem=filesystem, state=comp_state) + + # Write all the remaining state to disk. + with filesystem.open_output_stream( + (path / self.STATE_FILE_NAME).as_posix() + ) as f: + pickle.dump(state, f) + + return str(path) + + def restore_from_path( + self, + path: Union[str, pathlib.Path], + *, + component: Optional[str] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + **kwargs, + ) -> None: + """Restores the state of the implementing class from the given path. + + If the `component` arg is provided, `path` refers to a checkpoint of a + subcomponent of `self`, thus allowing the user to load only the subcomponent's + state into `self` without affecting any of the other state information (for + example, loading only the NN state into a Checkpointable, which contains such + an NN, but also has other state information that should NOT be changed by + calling this method). + + The given `path` should have the following structure and contain the following + files: + + .. testcode:: + :skipif: True + + path/ + [component1]/ + [component1 subcomponentA]/ + ... + [component1 subcomponentB]/ + ... + [component2]/ + ... + [cls.METADATA_FILE_NAME] (json) + [cls.STATE_FILE_NAME] (pkl) + + Note that the self.METADATA_FILE_NAME file is not required to restore the state. + + Args: + path: The path to load the implementing class' state from or to load the + state of only one subcomponent's state of the implementing class (if + `component` is provided). + component: If provided, `path` is interpreted as the checkpoint path of only + the subcomponent and thus, only that subcomponent's state is + restored/loaded. All other state of `self` remains unchanged in this + case. + filesystem: PyArrow FileSystem to use to access data at the `path`. If not + specified, this is inferred from the URI scheme of `path`. + **kwargs: Forward compatibility kwargs. + """ + path = path if isinstance(path, str) else path.as_posix() + + if path and not filesystem: + # Note the path needs to be a path that is relative to the + # filesystem (e.g. `gs://tmp/...` -> `tmp/...`). + filesystem, path = pyarrow.fs.FileSystem.from_uri(path) + # Only here convert to a `Path` instance b/c otherwise + # cloud path gets broken (i.e. 'gs://' -> 'gs:/'). + path = pathlib.Path(path) + + if not _exists_at_fs_path(filesystem, path.as_posix()): + raise FileNotFoundError(f"`path` ({path}) not found!") + + # Restore components of `self` that themselves are `Checkpointable`. + for comp_name, comp in self.get_checkpointable_components(): + + # The value of the `component` argument for the upcoming + # `[subcomponent].restore_from_path(.., component=..)` call. + comp_arg = None + + if component is None: + comp_dir = path / comp_name + # If subcomponent's dir is not in path, ignore it and don't restore this + # subcomponent's state from disk. + if not _exists_at_fs_path(filesystem, comp_dir.as_posix()): + continue + else: + comp_dir = path + + # `component` is a path that starts with `comp` -> Remove the name of + # `comp` from the `component` arg in the upcoming call to `restore_..`. + if component.startswith(comp_name + "/"): + comp_arg = component[len(comp_name) + 1 :] + # `component` has nothing to do with `comp` -> Skip. + elif component != comp_name: + continue + + # If component is an ActorManager, restore all the manager's healthy + # actors' states from disk (even if they are on another node, in which case, + # we'll sync checkpoint file(s) to the respective node). + if isinstance(comp, FaultTolerantActorManager): + head_node_ip = ray.util.get_node_ip_address() + all_healthy_actors = comp.healthy_actor_ids() + + def _restore( + w, + _kwargs=MappingProxyType(kwargs), + _path=comp_dir, + _head_ip=head_node_ip, + _comp_arg=comp_arg, + ): + import ray + import tempfile + + worker_node_ip = ray.util.get_node_ip_address() + # If the worker is on the same node as the head, load the checkpoint + # directly from the path otherwise sync the checkpoint from the head + # to the worker and load it from there. + if worker_node_ip == _head_ip: + w.restore_from_path(_path, component=_comp_arg, **_kwargs) + else: + with tempfile.TemporaryDirectory() as temp_dir: + sync_dir_between_nodes( + _head_ip, _path, worker_node_ip, temp_dir + ) + w.restore_from_path( + temp_dir, component=_comp_arg, **_kwargs + ) + + comp.foreach_actor(_restore, remote_actor_ids=all_healthy_actors) + + # Call `restore_from_path()` on local subcomponent, thereby passing in the + # **kwargs. + else: + comp.restore_from_path( + comp_dir, filesystem=filesystem, component=comp_arg, **kwargs + ) + + # Restore the rest of the state (not based on subcomponents). + if component is None: + with filesystem.open_input_stream( + (path / self.STATE_FILE_NAME).as_posix() + ) as f: + state = pickle.load(f) + self.set_state(state) + + @classmethod + def from_checkpoint( + cls, + path: Union[str, pathlib.Path], + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + **kwargs, + ) -> "Checkpointable": + """Creates a new Checkpointable instance from the given location and returns it. + + Args: + path: The checkpoint path to load (a) the information on how to construct + a new instance of the implementing class and (b) the state to restore + the created instance to. + filesystem: PyArrow FileSystem to use to access data at the `path`. If not + specified, this is inferred from the URI scheme of `path`. + kwargs: Forward compatibility kwargs. Note that these kwargs are sent to + each subcomponent's `from_checkpoint()` call. + + Returns: + A new instance of the implementing class, already set to the state stored + under `path`. + """ + # We need a string path for the `PyArrow` filesystem. + path = path if isinstance(path, str) else path.as_posix() + + # If no filesystem is passed in create one. + if path and not filesystem: + # Note the path needs to be a path that is relative to the + # filesystem (e.g. `gs://tmp/...` -> `tmp/...`). + filesystem, path = pyarrow.fs.FileSystem.from_uri(path) + # Only here convert to a `Path` instance b/c otherwise + # cloud path gets broken (i.e. 'gs://' -> 'gs:/'). + path = pathlib.Path(path) + + # Get the class constructor to call. + with filesystem.open_input_stream( + (path / cls.CLASS_AND_CTOR_ARGS_FILE_NAME).as_posix() + ) as f: + ctor_info = pickle.load(f) + ctor = ctor_info["class"] + + # Check, whether the constructor actually goes together with `cls`. + if not issubclass(ctor, cls): + raise ValueError( + f"The class ({ctor}) stored in checkpoint ({path}) does not seem to be " + f"a subclass of `cls` ({cls})!" + ) + elif not issubclass(ctor, Checkpointable): + raise ValueError( + f"The class ({ctor}) stored in checkpoint ({path}) does not seem to be " + "an implementer of the `Checkpointable` API!" + ) + + obj = ctor( + *ctor_info["ctor_args_and_kwargs"][0], + **ctor_info["ctor_args_and_kwargs"][1], + ) + # Restore the state of the constructed object. + obj.restore_from_path(path, filesystem=filesystem, **kwargs) + # Return the new object. + return obj + + @abc.abstractmethod + def get_state( + self, + components: Optional[Union[str, Collection[str]]] = None, + *, + not_components: Optional[Union[str, Collection[str]]] = None, + **kwargs, + ) -> StateDict: + """Returns the implementing class's current state as a dict. + + Args: + components: An optional collection of string keys to be included in the + returned state. This might be useful, if getting certain components + of the state is expensive (e.g. reading/compiling the weights of a large + NN) and at the same time, these components are not required by the + caller. + not_components: An optional list of string keys to be excluded in the + returned state, even if the same string is part of `components`. + This is useful to get the complete state of the class, except + one or a few components. + kwargs: Forward-compatibility kwargs. + + Returns: + The current state of the implementing class (or only the `components` + specified, w/o those in `not_components`). + """ + + @abc.abstractmethod + def set_state(self, state: StateDict) -> None: + """Sets the implementing class' state to the given state dict. + + If component keys are missing in `state`, these components of the implementing + class will not be updated/set. + + Args: + state: The state dict to restore the state from. Maps component keys + to the corresponding subcomponent's own state. + """ + + @abc.abstractmethod + def get_ctor_args_and_kwargs(self) -> Tuple[Tuple, Dict[str, Any]]: + """Returns the args/kwargs used to create `self` from its constructor. + + Returns: + A tuple of the args (as a tuple) and kwargs (as a Dict[str, Any]) used to + construct `self` from its class constructor. + """ + + @OverrideToImplementCustomLogic_CallToSuperRecommended + def get_metadata(self) -> Dict: + """Returns JSON writable metadata further describing the implementing class. + + Note that this metadata is NOT part of any state and is thus NOT needed to + restore the state of a Checkpointable instance from a directory. Rather, the + metadata will be written into `self.METADATA_FILE_NAME` when calling + `self.save_to_path()` for the user's convenience. + + Returns: + A JSON-encodable dict of metadata information. + """ + return { + "class_and_ctor_args_file": self.CLASS_AND_CTOR_ARGS_FILE_NAME, + "state_file": self.STATE_FILE_NAME, + "ray_version": ray.__version__, + "ray_commit": ray.__commit__, + } + + def get_checkpointable_components(self) -> List[Tuple[str, "Checkpointable"]]: + """Returns the implementing class's own Checkpointable subcomponents. + + Returns: + A list of 2-tuples (name, subcomponent) describing the implementing class' + subcomponents, all of which have to be `Checkpointable` themselves and + whose state is therefore written into subdirectories (rather than the main + state file (self.STATE_FILE_NAME) when calling `self.save_to_path()`). + """ + return [] + + def _check_component(self, name, components, not_components) -> bool: + comp_list = force_list(components) + not_comp_list = force_list(not_components) + if ( + components is None + or any(c.startswith(name + "/") for c in comp_list) + or name in comp_list + ) and (not_components is None or name not in not_comp_list): + return True + return False + + def _get_subcomponents(self, name, components): + if components is None: + return None + + components = force_list(components) + subcomponents = [] + for comp in components: + if comp.startswith(name + "/"): + subcomponents.append(comp[len(name) + 1 :]) + + return None if not subcomponents else subcomponents + + +def _exists_at_fs_path(fs: pyarrow.fs.FileSystem, path: str) -> bool: + """Returns `True` if the path can be found in the filesystem.""" + valid = fs.get_file_info(path) + return valid.type != pyarrow.fs.FileType.NotFound + + +def _is_dir(file_info: pyarrow.fs.FileInfo) -> bool: + """Returns `True`, if the file info is from a directory.""" + return file_info.type == pyarrow.fs.FileType.Directory + + +@PublicAPI(stability="alpha") +def get_checkpoint_info( + checkpoint: Union[str, Checkpoint], + filesystem: Optional["pyarrow.fs.FileSystem"] = None, +) -> Dict[str, Any]: + """Returns a dict with information about an Algorithm/Policy checkpoint. + + If the given checkpoint is a >=v1.0 checkpoint directory, try reading all + information from the contained `rllib_checkpoint.json` file. + + Args: + checkpoint: The checkpoint directory (str) or an AIR Checkpoint object. + filesystem: PyArrow FileSystem to use to access data at the `checkpoint`. If not + specified, this is inferred from the URI scheme provided by `checkpoint`. + + Returns: + A dict containing the keys: + "type": One of "Policy" or "Algorithm". + "checkpoint_version": A version tuple, e.g. v1.0, indicating the checkpoint + version. This will help RLlib to remain backward compatible wrt. future + Ray and checkpoint versions. + "checkpoint_dir": The directory with all the checkpoint files in it. This might + be the same as the incoming `checkpoint` arg. + "state_file": The main file with the Algorithm/Policy's state information in it. + This is usually a pickle-encoded file. + "policy_ids": An optional set of PolicyIDs in case we are dealing with an + Algorithm checkpoint. None if `checkpoint` is a Policy checkpoint. + """ + # Default checkpoint info. + info = { + "type": "Algorithm", + "format": "cloudpickle", + "checkpoint_version": CHECKPOINT_VERSION, + "checkpoint_dir": None, + "state_file": None, + "policy_ids": None, + "module_ids": None, + } + + # `checkpoint` is a Checkpoint instance: Translate to directory and continue. + if isinstance(checkpoint, Checkpoint): + checkpoint = checkpoint.to_directory() + + if checkpoint and not filesystem: + # Note the path needs to be a path that is relative to the + # filesystem (e.g. `gs://tmp/...` -> `tmp/...`). + filesystem, checkpoint = pyarrow.fs.FileSystem.from_uri(checkpoint) + # Only here convert to a `Path` instance b/c otherwise + # cloud path gets broken (i.e. 'gs://' -> 'gs:/'). + checkpoint = pathlib.Path(checkpoint) + + # Checkpoint is dir. + if _exists_at_fs_path(filesystem, checkpoint.as_posix()) and _is_dir( + filesystem.get_file_info(checkpoint.as_posix()) + ): + info.update({"checkpoint_dir": str(checkpoint)}) + + # Figure out whether this is an older checkpoint format + # (with a `checkpoint-\d+` file in it). + file_info_list = filesystem.get_file_info( + pyarrow.fs.FileSelector(checkpoint.as_posix(), recursive=False) + ) + for file_info in file_info_list: + if file_info.is_file: + if re.match("checkpoint-\\d+", file_info.base_name): + info.update( + { + "checkpoint_version": version.Version("0.1"), + "state_file": str(file_info.base_name), + } + ) + return info + + # No old checkpoint file found. + + # If rllib_checkpoint.json file present, read available information from it + # and then continue with the checkpoint analysis (possibly overriding further + # information). + if _exists_at_fs_path( + filesystem, (checkpoint / "rllib_checkpoint.json").as_posix() + ): + # if (checkpoint / "rllib_checkpoint.json").is_file(): + with filesystem.open_input_stream( + (checkpoint / "rllib_checkpoint.json").as_posix() + ) as f: + # with open(checkpoint / "rllib_checkpoint.json") as f: + rllib_checkpoint_info = json.load(fp=f) + if "checkpoint_version" in rllib_checkpoint_info: + rllib_checkpoint_info["checkpoint_version"] = version.Version( + rllib_checkpoint_info["checkpoint_version"] + ) + info.update(rllib_checkpoint_info) + else: + # No rllib_checkpoint.json file present: Warn and continue trying to figure + # out checkpoint info ourselves. + if log_once("no_rllib_checkpoint_json_file"): + logger.warning( + "No `rllib_checkpoint.json` file found in checkpoint directory " + f"{checkpoint}! Trying to extract checkpoint info from other files " + f"found in that dir." + ) + + # Policy checkpoint file found. + for extension in ["pkl", "msgpck"]: + if _exists_at_fs_path( + filesystem, (checkpoint / ("policy_state." + extension)).as_posix() + ): + # if (checkpoint / ("policy_state." + extension)).is_file(): + info.update( + { + "type": "Policy", + "format": "cloudpickle" if extension == "pkl" else "msgpack", + "checkpoint_version": CHECKPOINT_VERSION, + "state_file": str(checkpoint / f"policy_state.{extension}"), + } + ) + return info + + # Valid Algorithm checkpoint >v0 file found? + format = None + for extension in ["pkl", "msgpck"]: + state_file = checkpoint / f"algorithm_state.{extension}" + if ( + _exists_at_fs_path(filesystem, state_file.as_posix()) + and filesystem.get_file_info(state_file.as_posix()).is_file + ): + format = "cloudpickle" if extension == "pkl" else "msgpack" + break + if format is None: + raise ValueError( + "Given checkpoint does not seem to be valid! No file with the name " + "`algorithm_state.[pkl|msgpck]` (or `checkpoint-[0-9]+`) found." + ) + + info.update( + { + "format": format, + "state_file": str(state_file), + } + ) + + # Collect all policy IDs in the sub-dir "policies/". + policies_dir = checkpoint / "policies" + if _exists_at_fs_path(filesystem, policies_dir.as_posix()) and _is_dir( + filesystem.get_file_info(policies_dir.as_posix()) + ): + policy_ids = set() + file_info_list = filesystem.get_file_info( + pyarrow.fs.FileSelector(policies_dir.as_posix(), recursive=False) + ) + for file_info in file_info_list: + policy_ids.add(file_info.base_name) + info.update({"policy_ids": policy_ids}) + + # Collect all module IDs in the sub-dir "learner/module_state/". + modules_dir = ( + checkpoint + / COMPONENT_LEARNER_GROUP + / COMPONENT_LEARNER + / COMPONENT_RL_MODULE + ) + if _exists_at_fs_path(filesystem, checkpoint.as_posix()) and _is_dir( + filesystem.get_file_info(modules_dir.as_posix()) + ): + module_ids = set() + file_info_list = filesystem.get_file_info( + pyarrow.fs.FileSelector(modules_dir.as_posix(), recursive=False) + ) + for file_info in file_info_list: + # Only add subdirs (those are the ones where the RLModule data + # is stored, not files (could be json metadata files). + module_dir = modules_dir / file_info.base_name + if _is_dir(filesystem.get_file_info(module_dir.as_posix())): + module_ids.add(file_info.base_name) + info.update({"module_ids": module_ids}) + + # Checkpoint is a file: Use as-is (interpreting it as old Algorithm checkpoint + # version). + elif ( + _exists_at_fs_path(filesystem, checkpoint.as_posix()) + and filesystem.get_file_info(checkpoint.as_posix()).is_file + ): + info.update( + { + "checkpoint_version": version.Version("0.1"), + "checkpoint_dir": str(checkpoint.parent), + "state_file": str(checkpoint), + } + ) + + else: + raise ValueError( + f"Given checkpoint ({str(checkpoint)}) not found! Must be a " + "checkpoint directory (or a file for older checkpoint versions)." + ) + + return info + + +@PublicAPI(stability="beta") +def convert_to_msgpack_checkpoint( + checkpoint: Union[str, Checkpoint], + msgpack_checkpoint_dir: str, +) -> str: + """Converts an Algorithm checkpoint (pickle based) to a msgpack based one. + + Msgpack has the advantage of being python version independent. + + Args: + checkpoint: The directory, in which to find the Algorithm checkpoint (pickle + based). + msgpack_checkpoint_dir: The directory, in which to create the new msgpack + based checkpoint. + + Returns: + The directory in which the msgpack checkpoint has been created. Note that + this is the same as `msgpack_checkpoint_dir`. + """ + from ray.rllib.algorithms import Algorithm + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + from ray.rllib.core.rl_module import validate_module_id + + # Try to import msgpack and msgpack_numpy. + msgpack = try_import_msgpack(error=True) + + # Restore the Algorithm using the python version dependent checkpoint. + algo = Algorithm.from_checkpoint(checkpoint) + state = algo.__getstate__() + + # Convert all code in state into serializable data. + # Serialize the algorithm class. + state["algorithm_class"] = serialize_type(state["algorithm_class"]) + # Serialize the algorithm's config object. + if not isinstance(state["config"], dict): + state["config"] = state["config"].serialize() + else: + state["config"] = AlgorithmConfig._serialize_dict(state["config"]) + + # Extract policy states from worker state (Policies get their own + # checkpoint sub-dirs). + policy_states = {} + if "worker" in state and "policy_states" in state["worker"]: + policy_states = state["worker"].pop("policy_states", {}) + + # Policy mapping fn. + state["worker"]["policy_mapping_fn"] = NOT_SERIALIZABLE + # Is Policy to train function. + state["worker"]["is_policy_to_train"] = NOT_SERIALIZABLE + + # Add RLlib checkpoint version (as string). + if state["config"]["enable_rl_module_and_learner"]: + state["checkpoint_version"] = str(CHECKPOINT_VERSION_LEARNER) + else: + state["checkpoint_version"] = str(CHECKPOINT_VERSION) + + # Write state (w/o policies) to disk. + state_file = os.path.join(msgpack_checkpoint_dir, "algorithm_state.msgpck") + with open(state_file, "wb") as f: + msgpack.dump(state, f) + + # Write rllib_checkpoint.json. + with open(os.path.join(msgpack_checkpoint_dir, "rllib_checkpoint.json"), "w") as f: + json.dump( + { + "type": "Algorithm", + "checkpoint_version": state["checkpoint_version"], + "format": "msgpack", + "state_file": state_file, + "policy_ids": list(policy_states.keys()), + "ray_version": ray.__version__, + "ray_commit": ray.__commit__, + }, + f, + ) + + # Write individual policies to disk, each in their own subdirectory. + for pid, policy_state in policy_states.items(): + # From here on, disallow policyIDs that would not work as directory names. + validate_module_id(pid, error=True) + policy_dir = os.path.join(msgpack_checkpoint_dir, "policies", pid) + os.makedirs(policy_dir, exist_ok=True) + policy = algo.get_policy(pid) + policy.export_checkpoint( + policy_dir, + policy_state=policy_state, + checkpoint_format="msgpack", + ) + + # Release all resources used by the Algorithm. + algo.stop() + + return msgpack_checkpoint_dir + + +@PublicAPI(stability="beta") +def convert_to_msgpack_policy_checkpoint( + policy_checkpoint: Union[str, Checkpoint], + msgpack_checkpoint_dir: str, +) -> str: + """Converts a Policy checkpoint (pickle based) to a msgpack based one. + + Msgpack has the advantage of being python version independent. + + Args: + policy_checkpoint: The directory, in which to find the Policy checkpoint (pickle + based). + msgpack_checkpoint_dir: The directory, in which to create the new msgpack + based checkpoint. + + Returns: + The directory in which the msgpack checkpoint has been created. Note that + this is the same as `msgpack_checkpoint_dir`. + """ + from ray.rllib.policy.policy import Policy + + policy = Policy.from_checkpoint(policy_checkpoint) + + os.makedirs(msgpack_checkpoint_dir, exist_ok=True) + policy.export_checkpoint( + msgpack_checkpoint_dir, + policy_state=policy.get_state(), + checkpoint_format="msgpack", + ) + + # Release all resources used by the Policy. + del policy + + return msgpack_checkpoint_dir + + +@PublicAPI +def try_import_msgpack(error: bool = False): + """Tries importing msgpack and msgpack_numpy and returns the patched msgpack module. + + Returns None if error is False and msgpack or msgpack_numpy is not installed. + Raises an error, if error is True and the modules could not be imported. + + Args: + error: Whether to raise an error if msgpack/msgpack_numpy cannot be imported. + + Returns: + The `msgpack` module. + + Raises: + ImportError: If error=True and msgpack/msgpack_numpy is not installed. + """ + try: + import msgpack + import msgpack_numpy + + # Make msgpack_numpy look like msgpack. + msgpack_numpy.patch() + + return msgpack + + except Exception: + if error: + raise ImportError( + "Could not import or setup msgpack and msgpack_numpy! " + "Try running `pip install msgpack msgpack_numpy` first." + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/compression.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5e3e6975b4535b6a10a713d85d03ba2093c180 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/compression.py @@ -0,0 +1,90 @@ +from ray.rllib.utils.annotations import DeveloperAPI + +import logging +import time +import base64 +import numpy as np +from ray import cloudpickle as pickle + +logger = logging.getLogger(__name__) + +try: + import lz4.frame + + LZ4_ENABLED = True +except ImportError: + logger.warning( + "lz4 not available, disabling sample compression. " + "This will significantly impact RLlib performance. " + "To install lz4, run `pip install lz4`." + ) + LZ4_ENABLED = False + + +@DeveloperAPI +def compression_supported(): + return LZ4_ENABLED + + +@DeveloperAPI +def pack(data): + if LZ4_ENABLED: + data = pickle.dumps(data) + data = lz4.frame.compress(data) + # TODO(ekl) we shouldn't need to base64 encode this data, but this + # seems to not survive a transfer through the object store if we don't. + data = base64.b64encode(data).decode("ascii") + return data + + +@DeveloperAPI +def pack_if_needed(data): + if isinstance(data, np.ndarray): + data = pack(data) + return data + + +@DeveloperAPI +def unpack(data): + if LZ4_ENABLED: + data = base64.b64decode(data) + data = lz4.frame.decompress(data) + data = pickle.loads(data) + return data + + +@DeveloperAPI +def unpack_if_needed(data): + if is_compressed(data): + data = unpack(data) + return data + + +@DeveloperAPI +def is_compressed(data): + return isinstance(data, bytes) or isinstance(data, str) + + +# Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz +# Compression speed: 753.664 MB/s +# Compression ratio: 87.4839812046 +# Decompression speed: 910.9504 MB/s +if __name__ == "__main__": + size = 32 * 80 * 80 * 4 + data = np.ones(size).reshape((32, 80, 80, 4)) + + count = 0 + start = time.time() + while time.time() - start < 1: + pack(data) + count += 1 + compressed = pack(data) + print("Compression speed: {} MB/s".format(count * size * 4 / 1e6)) + print("Compression ratio: {}".format(round(size * 4 / len(compressed), 2))) + + count = 0 + start = time.time() + while time.time() - start < 1: + unpack(compressed) + count += 1 + print("Decompression speed: {} MB/s".format(count * size * 4 / 1e6)) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/deprecation.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..354a412b262ee615bf716e83eab246dd419dac4e --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/deprecation.py @@ -0,0 +1,134 @@ +import inspect +import logging +from typing import Optional, Union + +from ray.util import log_once +from ray.util.annotations import _mark_annotated + +logger = logging.getLogger(__name__) + +# A constant to use for any configuration that should be deprecated +# (to check, whether this config has actually been assigned a proper value or +# not). +DEPRECATED_VALUE = -1 + + +def deprecation_warning( + old: str, + new: Optional[str] = None, + *, + help: Optional[str] = None, + error: Optional[Union[bool, Exception]] = None, +) -> None: + """Warns (via the `logger` object) or throws a deprecation warning/error. + + Args: + old: A description of the "thing" that is to be deprecated. + new: A description of the new "thing" that replaces it. + help: An optional help text to tell the user, what to + do instead of using `old`. + error: Whether or which exception to raise. If True, raise ValueError. + If False, just warn. If `error` is-a subclass of Exception, + raise that Exception. + + Raises: + ValueError: If `error=True`. + Exception: Of type `error`, iff `error` is a sub-class of `Exception`. + """ + msg = "`{}` has been deprecated.{}".format( + old, (" Use `{}` instead.".format(new) if new else f" {help}" if help else "") + ) + + if error: + if not type(error) is bool and issubclass(error, Exception): + # error is an Exception + raise error(msg) + else: + # error is a boolean, construct ValueError ourselves + raise ValueError(msg) + else: + logger.warning( + "DeprecationWarning: " + msg + " This will raise an error in the future!" + ) + + +def Deprecated(old=None, *, new=None, help=None, error): + """Decorator for documenting a deprecated class, method, or function. + + Automatically adds a `deprecation.deprecation_warning(old=..., + error=False)` to not break existing code at this point to the decorated + class' constructor, method, or function. + + In a next major release, this warning should then be made an error + (by setting error=True), which means at this point that the + class/method/function is no longer supported, but will still inform + the user about the deprecation event. + + In a further major release, the class, method, function should be erased + entirely from the codebase. + + + .. testcode:: + :skipif: True + + from ray.rllib.utils.deprecation import Deprecated + # Deprecated class: Patches the constructor to warn if the class is + # used. + @Deprecated(new="NewAndMuchCoolerClass", error=False) + class OldAndUncoolClass: + ... + + # Deprecated class method: Patches the method to warn if called. + class StillCoolClass: + ... + @Deprecated(new="StillCoolClass.new_and_much_cooler_method()", + error=False) + def old_and_uncool_method(self, uncool_arg): + ... + + # Deprecated function: Patches the function to warn if called. + @Deprecated(new="new_and_much_cooler_function", error=False) + def old_and_uncool_function(*uncool_args): + ... + """ + + def _inner(obj): + # A deprecated class. + if inspect.isclass(obj): + # Patch the class' init method to raise the warning/error. + obj_init = obj.__init__ + + def patched_init(*args, **kwargs): + if log_once(old or obj.__name__): + deprecation_warning( + old=old or obj.__name__, + new=new, + help=help, + error=error, + ) + return obj_init(*args, **kwargs) + + obj.__init__ = patched_init + _mark_annotated(obj) + # Return the patched class (with the warning/error when + # instantiated). + return obj + + # A deprecated class method or function. + # Patch with the warning/error at the beginning. + def _ctor(*args, **kwargs): + if log_once(old or obj.__name__): + deprecation_warning( + old=old or obj.__name__, + new=new, + help=help, + error=error, + ) + # Call the deprecated method/function. + return obj(*args, **kwargs) + + # Return the patched class method/function. + return _ctor + + # Return the prepared decorator. + return _inner diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/error.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/error.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b9db4c351a38b8fbb6acf5c660a15db807c1a6 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/error.py @@ -0,0 +1,128 @@ +from ray.rllib.utils.annotations import PublicAPI + + +@PublicAPI +class UnsupportedSpaceException(Exception): + """Error for an unsupported action or observation space.""" + + pass + + +@PublicAPI +class EnvError(Exception): + """Error if we encounter an error during RL environment validation.""" + + pass + + +@PublicAPI +class MultiAgentEnvError(Exception): + """Error if we encounter an error during MultiAgentEnv stepping/validation.""" + + pass + + +@PublicAPI +class NotSerializable(Exception): + """Error if we encounter objects that can't be serialized by ray.""" + + pass + + +# ------- +# Error messages +# ------- + +# Message explaining there are no GPUs available for the +# num_gpus=n or num_gpus_per_env_runner=m settings. +ERR_MSG_NO_GPUS = """Found {} GPUs on your machine (GPU devices found: {})! If your + machine does not have any GPUs, you should set the config keys + `num_gpus_per_learner` and `num_gpus_per_env_runner` to 0. They may be set to + 1 by default for your particular RL algorithm.""" + +ERR_MSG_INVALID_ENV_DESCRIPTOR = """The env string you provided ('{}') is: +a) Not a supported or -installed environment. +b) Not a tune-registered environment creator. +c) Not a valid env class string. + +Try one of the following: +a) For Atari support: `pip install gym[atari] autorom[accept-rom-license]`. + For PyBullet support: `pip install pybullet`. +b) To register your custom env, do `from ray import tune; + tune.register('[name]', lambda cfg: [return env obj from here using cfg])`. + Then in your config, do `config['env'] = [name]`. +c) Make sure you provide a fully qualified classpath, e.g.: + `ray.rllib.examples.envs.classes.repeat_after_me_env.RepeatAfterMeEnv` +""" + + +ERR_MSG_OLD_GYM_API = """Your environment ({}) does not abide to the new gymnasium-style API! +From Ray 2.3 on, RLlib only supports the new (gym>=0.26 or gymnasium) Env APIs. +{} +Learn more about the most important changes here: +https://github.com/openai/gym and here: https://github.com/Farama-Foundation/Gymnasium + +In order to fix this problem, do the following: + +1) Run `pip install gymnasium` on your command line. +2) Change all your import statements in your code from + `import gym` -> `import gymnasium as gym` OR + `from gym.spaces import Discrete` -> `from gymnasium.spaces import Discrete` + +For your custom (single agent) gym.Env classes: +3.1) Either wrap your old Env class via the provided `from gymnasium.wrappers import + EnvCompatibility` wrapper class. +3.2) Alternatively to 3.1: + - Change your `reset()` method to have the call signature 'def reset(self, *, + seed=None, options=None)' + - Return an additional info dict (empty dict should be fine) from your `reset()` + method. + - Return an additional `truncated` flag from your `step()` method (between `done` and + `info`). This flag should indicate, whether the episode was terminated prematurely + due to some time constraint or other kind of horizon setting. + +For your custom RLlib `MultiAgentEnv` classes: +4.1) Either wrap your old MultiAgentEnv via the provided + `from ray.rllib.env.wrappers.multi_agent_env_compatibility import + MultiAgentEnvCompatibility` wrapper class. +4.2) Alternatively to 4.1: + - Change your `reset()` method to have the call signature + 'def reset(self, *, seed=None, options=None)' + - Return an additional per-agent info dict (empty dict should be fine) from your + `reset()` method. + - Rename `dones` into `terminateds` and only set this to True, if the episode is really + done (as opposed to has been terminated prematurely due to some horizon/time-limit + setting). + - Return an additional `truncateds` per-agent dictionary flag from your `step()` + method, including the `__all__` key (100% analogous to your `dones/terminateds` + per-agent dict). + Return this new `truncateds` dict between `dones/terminateds` and `infos`. This + flag should indicate, whether the episode (for some agent or all agents) was + terminated prematurely due to some time constraint or other kind of horizon setting. +""" # noqa + + +ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL = """Could not save keras model under self[TfPolicy].model.base_model! + This is either due to .. + a) .. this Policy's ModelV2 not having any `base_model` (tf.keras.Model) property + b) .. the ModelV2's `base_model` not being used by the Algorithm and thus its + variables not being properly initialized. +""" # noqa + +ERR_MSG_TORCH_POLICY_CANNOT_SAVE_MODEL = """Could not save torch model under self[TorchPolicy].model! + This is most likely due to the fact that you are using an Algorithm that + uses a Catalog-generated TorchModelV2 subclass, which is torch.save() cannot pickle. +""" # noqa + +# ------- +# HOWTO_ strings can be added to any error/warning/into message +# to eplain to the user, how to actually fix the encountered problem. +# ------- + +# HOWTO change the RLlib config, depending on how user runs the job. +HOWTO_CHANGE_CONFIG = """ +To change the config for `tune.Tuner().fit()` in a script: Modify the python dict + passed to `tune.Tuner(param_space=[...]).fit()`. +To change the config for an RLlib Algorithm instance: Modify the python dict + passed to the Algorithm's constructor, e.g. `PPO(config=[...])`. +""" diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..d969abddb119e970621e0d95a2f00cadbf5de95a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter.py @@ -0,0 +1,420 @@ +import logging +import threading + +import numpy as np +import tree # pip install dm_tree + +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.deprecation import Deprecated +from ray.rllib.utils.numpy import SMALL_NUMBER +from ray.rllib.utils.typing import TensorStructType +from ray.rllib.utils.serialization import _serialize_ndarray, _deserialize_ndarray +from ray.rllib.utils.deprecation import deprecation_warning + +logger = logging.getLogger(__name__) + + +@OldAPIStack +class Filter: + """Processes input, possibly statefully.""" + + def apply_changes(self, other: "Filter", *args, **kwargs) -> None: + """Updates self with "new state" from other filter.""" + raise NotImplementedError + + def copy(self) -> "Filter": + """Creates a new object with same state as self. + + Returns: + A copy of self. + """ + raise NotImplementedError + + def sync(self, other: "Filter") -> None: + """Copies all state from other filter to self.""" + raise NotImplementedError + + def reset_buffer(self) -> None: + """Creates copy of current state and resets accumulated state""" + raise NotImplementedError + + def as_serializable(self) -> "Filter": + raise NotImplementedError + + @Deprecated(new="Filter.reset_buffer()", error=True) + def clear_buffer(self): + pass + + +@OldAPIStack +class NoFilter(Filter): + is_concurrent = True + + def __call__(self, x: TensorStructType, update=True): + # Process no further if already np.ndarray, dict, or tuple. + if isinstance(x, (np.ndarray, dict, tuple)): + return x + + try: + return np.asarray(x) + except Exception: + raise ValueError("Failed to convert to array", x) + + def apply_changes(self, other: "NoFilter", *args, **kwargs) -> None: + pass + + def copy(self) -> "NoFilter": + return self + + def sync(self, other: "NoFilter") -> None: + pass + + def reset_buffer(self) -> None: + pass + + def as_serializable(self) -> "NoFilter": + return self + + +# http://www.johndcook.com/blog/standard_deviation/ +@OldAPIStack +class RunningStat: + def __init__(self, shape=()): + self.num_pushes = 0 + self.mean_array = np.zeros(shape) + self.std_array = np.zeros(shape) + + def copy(self): + other = RunningStat() + # TODO: Remove these safe-guards if not needed anymore. + other.num_pushes = self.num_pushes if hasattr(self, "num_pushes") else self._n + other.mean_array = ( + np.copy(self.mean_array) + if hasattr(self, "mean_array") + else np.copy(self._M) + ) + other.std_array = ( + np.copy(self.std_array) if hasattr(self, "std_array") else np.copy(self._S) + ) + return other + + def push(self, x): + x = np.asarray(x) + # Unvectorized update of the running statistics. + if x.shape != self.mean_array.shape: + raise ValueError( + "Unexpected input shape {}, expected {}, value = {}".format( + x.shape, self.mean_array.shape, x + ) + ) + self.num_pushes += 1 + if self.num_pushes == 1: + self.mean_array[...] = x + else: + delta = x - self.mean_array + self.mean_array[...] += delta / self.num_pushes + self.std_array[...] += ( + (delta / self.num_pushes) * delta * (self.num_pushes - 1) + ) + + def update(self, other): + n1 = float(self.num_pushes) + n2 = float(other.num_pushes) + n = n1 + n2 + if n == 0: + # Avoid divide by zero, which creates nans + return + delta = self.mean_array - other.mean_array + delta2 = delta * delta + m = (n1 * self.mean_array + n2 * other.mean_array) / n + s = self.std_array + other.std_array + (delta2 / n) * n1 * n2 + self.num_pushes = n + self.mean_array = m + self.std_array = s + + def __repr__(self): + return "(n={}, mean_mean={}, mean_std={})".format( + self.n, np.mean(self.mean), np.mean(self.std) + ) + + @property + def n(self): + return self.num_pushes + + @property + def mean(self): + return self.mean_array + + @property + def var(self): + return ( + self.std_array / (self.num_pushes - 1) + if self.num_pushes > 1 + else np.square(self.mean_array) + ).astype(np.float32) + + @property + def std(self): + return np.sqrt(self.var) + + @property + def shape(self): + return self.mean_array.shape + + def to_state(self): + return { + "num_pushes": self.num_pushes, + "mean_array": _serialize_ndarray(self.mean_array), + "std_array": _serialize_ndarray(self.std_array), + } + + @staticmethod + def from_state(state): + running_stats = RunningStat() + running_stats.num_pushes = state["num_pushes"] + running_stats.mean_array = _deserialize_ndarray(state["mean_array"]) + running_stats.std_array = _deserialize_ndarray(state["std_array"]) + return running_stats + + +@OldAPIStack +class MeanStdFilter(Filter): + """Keeps track of a running mean for seen states""" + + is_concurrent = False + + def __init__(self, shape, demean=True, destd=True, clip=10.0): + self.shape = shape + # We don't have a preprocessor, if shape is None (Discrete) or + # flat_shape is Tuple[np.ndarray] or Dict[str, np.ndarray] + # (complex inputs). + flat_shape = tree.flatten(self.shape) + self.no_preprocessor = shape is None or ( + isinstance(self.shape, (dict, tuple)) + and len(flat_shape) > 0 + and isinstance(flat_shape[0], np.ndarray) + ) + # If preprocessing (flattening dicts/tuples), make sure shape + # is an np.ndarray, so we don't confuse it with a complex Tuple + # space's shape structure (which is a Tuple[np.ndarray]). + if not self.no_preprocessor: + self.shape = np.array(self.shape) + self.demean = demean + self.destd = destd + self.clip = clip + # Running stats. + self.running_stats = tree.map_structure(lambda s: RunningStat(s), self.shape) + + # In distributed rollouts, each worker sees different states. + # The buffer is used to keep track of deltas amongst all the + # observation filters. + self.buffer = None + self.reset_buffer() + + def reset_buffer(self) -> None: + self.buffer = tree.map_structure(lambda s: RunningStat(s), self.shape) + + def apply_changes( + self, other: "MeanStdFilter", with_buffer: bool = False, *args, **kwargs + ) -> None: + """Applies updates from the buffer of another filter. + + Args: + other: Other filter to apply info from + with_buffer: Flag for specifying if the buffer should be + copied from other. + + .. testcode:: + :skipif: True + + a = MeanStdFilter(()) + a(1) + a(2) + print([a.running_stats.n, a.running_stats.mean, a.buffer.n]) + + .. testoutput:: + + [2, 1.5, 2] + + .. testcode:: + :skipif: True + + b = MeanStdFilter(()) + b(10) + a.apply_changes(b, with_buffer=False) + print([a.running_stats.n, a.running_stats.mean, a.buffer.n]) + + .. testoutput:: + + [3, 4.333333333333333, 2] + + .. testcode:: + :skipif: True + + a.apply_changes(b, with_buffer=True) + print([a.running_stats.n, a.running_stats.mean, a.buffer.n]) + + .. testoutput:: + + [4, 5.75, 1] + """ + tree.map_structure( + lambda rs, other_rs: rs.update(other_rs), self.running_stats, other.buffer + ) + if with_buffer: + self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer) + + def copy(self) -> "MeanStdFilter": + """Returns a copy of `self`.""" + other = MeanStdFilter(self.shape) + other.sync(self) + return other + + def as_serializable(self) -> "MeanStdFilter": + return self.copy() + + def sync(self, other: "MeanStdFilter") -> None: + """Syncs all fields together from other filter. + + .. testcode:: + :skipif: True + + a = MeanStdFilter(()) + a(1) + a(2) + print([a.running_stats.n, a.running_stats.mean, a.buffer.n]) + + .. testoutput:: + + [2, array(1.5), 2] + + .. testcode:: + :skipif: True + + b = MeanStdFilter(()) + b(10) + print([b.running_stats.n, b.running_stats.mean, b.buffer.n]) + + .. testoutput:: + + [1, array(10.0), 1] + + .. testcode:: + :skipif: True + + a.sync(b) + print([a.running_stats.n, a.running_stats.mean, a.buffer.n]) + + .. testoutput:: + + [1, array(10.0), 1] + """ + self.demean = other.demean + self.destd = other.destd + self.clip = other.clip + self.running_stats = tree.map_structure( + lambda rs: rs.copy(), other.running_stats + ) + self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer) + + def __call__(self, x: TensorStructType, update: bool = True) -> TensorStructType: + if self.no_preprocessor: + x = tree.map_structure(lambda x_: np.asarray(x_), x) + else: + x = np.asarray(x) + + def _helper(x, rs, buffer, shape): + # Discrete|MultiDiscrete spaces -> No normalization. + if shape is None: + return x + + # Keep dtype as is througout this filter. + orig_dtype = x.dtype + + if update: + if len(x.shape) == len(rs.shape) + 1: + # The vectorized case. + for i in range(x.shape[0]): + rs.push(x[i]) + buffer.push(x[i]) + else: + # The unvectorized case. + rs.push(x) + buffer.push(x) + if self.demean: + x = x - rs.mean + if self.destd: + x = x / (rs.std + SMALL_NUMBER) + if self.clip: + x = np.clip(x, -self.clip, self.clip) + return x.astype(orig_dtype) + + if self.no_preprocessor: + return tree.map_structure_up_to( + x, _helper, x, self.running_stats, self.buffer, self.shape + ) + else: + return _helper(x, self.running_stats, self.buffer, self.shape) + + +@OldAPIStack +class ConcurrentMeanStdFilter(MeanStdFilter): + is_concurrent = True + + def __init__(self, *args, **kwargs): + super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs) + deprecation_warning( + old="ConcurrentMeanStdFilter", + error=False, + help="ConcurrentMeanStd filters are only used for testing and will " + "therefore be deprecated in the course of moving to the " + "Connetors API, where testing of filters will be done by other " + "means.", + ) + + self._lock = threading.RLock() + + def lock_wrap(func): + def wrapper(*args, **kwargs): + with self._lock: + return func(*args, **kwargs) + + return wrapper + + self.__getattribute__ = lock_wrap(self.__getattribute__) + + def as_serializable(self) -> "MeanStdFilter": + """Returns non-concurrent version of current class""" + other = MeanStdFilter(self.shape) + other.sync(self) + return other + + def copy(self) -> "ConcurrentMeanStdFilter": + """Returns a copy of Filter.""" + other = ConcurrentMeanStdFilter(self.shape) + other.sync(self) + return other + + def __repr__(self) -> str: + return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format( + self.shape, + self.demean, + self.destd, + self.clip, + self.running_stats, + self.buffer, + ) + + +@OldAPIStack +def get_filter(filter_config, shape): + if filter_config == "MeanStdFilter": + return MeanStdFilter(shape, clip=None) + elif filter_config == "ConcurrentMeanStdFilter": + return ConcurrentMeanStdFilter(shape, clip=None) + elif filter_config == "NoFilter": + return NoFilter() + elif callable(filter_config): + return filter_config(shape) + else: + raise Exception("Unknown observation_filter: " + str(filter_config)) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter_manager.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..c00f1f625b92890e7d9992364f74a52a1f0c6e5a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/filter_manager.py @@ -0,0 +1,82 @@ +import logging +from typing import Optional + +import ray +from ray.rllib.utils.annotations import OldAPIStack + +logger = logging.getLogger(__name__) + + +@OldAPIStack +class FilterManager: + """Manages filters and coordination across remote evaluators that expose + `get_filters` and `sync_filters`. + """ + + @staticmethod + def synchronize( + local_filters, + worker_set, + update_remote=True, + timeout_seconds: Optional[float] = None, + use_remote_data_for_update: bool = True, + ): + """Aggregates filters from remote workers (if use_remote_data_for_update=True). + + Local copy is updated and then broadcasted to all remote evaluators + (if `update_remote` is True). + + Args: + local_filters: Filters to be synchronized. + worker_set: EnvRunnerGroup with remote EnvRunners with filters. + update_remote: Whether to push updates from the local filters to the remote + workers' filters. + timeout_seconds: How long to wait for filter to get or set filters + use_remote_data_for_update: Whether to use the `worker_set`'s remote workers + to update the local filters. If False, stats from the remote workers + will not be used and discarded. + """ + # No sync/update required in either direction -> Early out. + if not (update_remote or use_remote_data_for_update): + return + + logger.debug(f"Synchronizing filters: {local_filters}") + + # Get the filters from the remote workers. + remote_filters = worker_set.foreach_worker( + func=lambda worker: worker.get_filters(flush_after=True), + local_env_runner=False, + timeout_seconds=timeout_seconds, + ) + if len(remote_filters) != worker_set.num_healthy_remote_workers(): + logger.error( + "Failed to get remote filters from a rollout worker in " + "FilterManager! " + "Filtered metrics may be computed, but filtered wrong." + ) + + # Should we utilize the remote workers' filter stats to update the local + # filters? + if use_remote_data_for_update: + for rf in remote_filters: + for k in local_filters: + local_filters[k].apply_changes(rf[k], with_buffer=False) + + # Should we update the remote workers' filters from the (now possibly synched) + # local filters? + if update_remote: + copies = {k: v.as_serializable() for k, v in local_filters.items()} + remote_copy = ray.put(copies) + + logger.debug("Updating remote filters ...") + results = worker_set.foreach_worker( + func=lambda worker: worker.sync_filters(ray.get(remote_copy)), + local_env_runner=False, + timeout_seconds=timeout_seconds, + ) + if len(results) != worker_set.num_healthy_remote_workers(): + logger.error( + "Failed to set remote filters to a rollout worker in " + "FilterManager. " + "Filtered metrics may be computed, but filtered wrong." + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/framework.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/framework.py new file mode 100644 index 0000000000000000000000000000000000000000..cba67295ff2bac4b1eed83b906c60286f45ac976 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/framework.py @@ -0,0 +1,352 @@ +import logging +import numpy as np +import os +import sys +from typing import Any, Optional + +import tree # pip install dm_tree + +from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI +from ray.rllib.utils.deprecation import Deprecated +from ray.rllib.utils.typing import ( + TensorShape, + TensorStructType, + TensorType, +) + +logger = logging.getLogger(__name__) + + +@PublicAPI +def convert_to_tensor( + data: TensorStructType, + framework: str, + device: Optional[str] = None, +): + """Converts any nested numpy struct into framework-specific tensors. + + Args: + data: The input data (numpy) to convert to framework-specific tensors. + framework: The framework to convert to. Only "torch" and "tf2" allowed. + device: An optional device name (for torch only). + + Returns: + The converted tensor struct matching the input data. + """ + if framework == "torch": + from ray.rllib.utils.torch_utils import convert_to_torch_tensor + + return convert_to_torch_tensor(data, device=device) + elif framework == "tf2": + _, tf, _ = try_import_tf() + + return tree.map_structure(lambda s: tf.convert_to_tensor(s), data) + raise NotImplementedError( + f"framework={framework} not supported in `convert_to_tensor()`!" + ) + + +@PublicAPI +def try_import_jax(error: bool = False): + """Tries importing JAX and FLAX and returns both modules (or Nones). + + Args: + error: Whether to raise an error if JAX/FLAX cannot be imported. + + Returns: + Tuple containing the jax- and the flax modules. + + Raises: + ImportError: If error=True and JAX is not installed. + """ + if "RLLIB_TEST_NO_JAX_IMPORT" in os.environ: + logger.warning("Not importing JAX for test purposes.") + return None, None + + try: + import jax + import flax + except ImportError: + if error: + raise ImportError( + "Could not import JAX! RLlib requires you to " + "install at least one deep-learning framework: " + "`pip install [torch|tensorflow|jax]`." + ) + return None, None + + return jax, flax + + +@PublicAPI +def try_import_tf(error: bool = False): + """Tries importing tf and returns the module (or None). + + Args: + error: Whether to raise an error if tf cannot be imported. + + Returns: + Tuple containing + 1) tf1.x module (either from tf2.x.compat.v1 OR as tf1.x). + 2) tf module (resulting from `import tensorflow`). Either tf1.x or + 2.x. 3) The actually installed tf version as int: 1 or 2. + + Raises: + ImportError: If error=True and tf is not installed. + """ + tf_stub = _TFStub() + # Make sure, these are reset after each test case + # that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"] + if "RLLIB_TEST_NO_TF_IMPORT" in os.environ: + logger.warning("Not importing TensorFlow for test purposes") + return None, tf_stub, None + + if "TF_CPP_MIN_LOG_LEVEL" not in os.environ: + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + # Try to reuse already imported tf module. This will avoid going through + # the initial import steps below and thereby switching off v2_behavior + # (switching off v2 behavior twice breaks all-framework tests for eager). + was_imported = False + if "tensorflow" in sys.modules: + tf_module = sys.modules["tensorflow"] + was_imported = True + + else: + try: + import tensorflow as tf_module + except ImportError: + if error: + raise ImportError( + "Could not import TensorFlow! RLlib requires you to " + "install at least one deep-learning framework: " + "`pip install [torch|tensorflow|jax]`." + ) + return None, tf_stub, None + + # Try "reducing" tf to tf.compat.v1. + try: + tf1_module = tf_module.compat.v1 + tf1_module.logging.set_verbosity(tf1_module.logging.ERROR) + if not was_imported: + tf1_module.disable_v2_behavior() + tf1_module.enable_resource_variables() + tf1_module.logging.set_verbosity(tf1_module.logging.WARN) + # No compat.v1 -> return tf as is. + except AttributeError: + tf1_module = tf_module + + if not hasattr(tf_module, "__version__"): + version = 1 # sphinx doc gen + else: + version = 2 if "2." in tf_module.__version__[:2] else 1 + + return tf1_module, tf_module, version + + +# Fake module for tf. +class _TFStub: + def __init__(self) -> None: + self.keras = _KerasStub() + + def __bool__(self): + # if tf should return False + return False + + +# Fake module for tf.keras. +class _KerasStub: + def __init__(self) -> None: + self.Model = _FakeTfClassStub + + +# Fake classes under keras (e.g for tf.keras.Model) +class _FakeTfClassStub: + def __init__(self, *a, **kw): + raise ImportError("Could not import `tensorflow`. Try pip install tensorflow.") + + +@DeveloperAPI +def tf_function(tf_module): + """Conditional decorator for @tf.function. + + Use @tf_function(tf) instead to avoid errors if tf is not installed.""" + + # The actual decorator to use (pass in `tf` (which could be None)). + def decorator(func): + # If tf not installed -> return function as is (won't be used anyways). + if tf_module is None or tf_module.executing_eagerly(): + return func + # If tf installed, return @tf.function-decorated function. + return tf_module.function(func) + + return decorator + + +@PublicAPI +def try_import_tfp(error: bool = False): + """Tries importing tfp and returns the module (or None). + + Args: + error: Whether to raise an error if tfp cannot be imported. + + Returns: + The tfp module. + + Raises: + ImportError: If error=True and tfp is not installed. + """ + if "RLLIB_TEST_NO_TF_IMPORT" in os.environ: + logger.warning("Not importing TensorFlow Probability for test purposes.") + return None + + try: + import tensorflow_probability as tfp + + return tfp + except ImportError as e: + if error: + raise e + return None + + +# Fake module for torch.nn. +class _NNStub: + def __init__(self, *a, **kw): + # Fake nn.functional module within torch.nn. + self.functional = None + self.Module = _FakeTorchClassStub + self.parallel = _ParallelStub() + + +# Fake class for e.g. torch.nn.Module to allow it to be inherited from. +class _FakeTorchClassStub: + def __init__(self, *a, **kw): + raise ImportError("Could not import `torch`. Try pip install torch.") + + +class _ParallelStub: + def __init__(self, *a, **kw): + self.DataParallel = _FakeTorchClassStub + self.DistributedDataParallel = _FakeTorchClassStub + + +@PublicAPI +def try_import_torch(error: bool = False): + """Tries importing torch and returns the module (or None). + + Args: + error: Whether to raise an error if torch cannot be imported. + + Returns: + Tuple consisting of the torch- AND torch.nn modules. + + Raises: + ImportError: If error=True and PyTorch is not installed. + """ + if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ: + logger.warning("Not importing PyTorch for test purposes.") + return _torch_stubs() + + try: + import torch + import torch.nn as nn + + return torch, nn + except ImportError: + if error: + raise ImportError( + "Could not import PyTorch! RLlib requires you to " + "install at least one deep-learning framework: " + "`pip install [torch|tensorflow|jax]`." + ) + return _torch_stubs() + + +def _torch_stubs(): + nn = _NNStub() + return None, nn + + +@DeveloperAPI +def get_variable( + value: Any, + framework: str = "tf", + trainable: bool = False, + tf_name: str = "unnamed-variable", + torch_tensor: bool = False, + device: Optional[str] = None, + shape: Optional[TensorShape] = None, + dtype: Optional[TensorType] = None, +) -> Any: + """Creates a tf variable, a torch tensor, or a python primitive. + + Args: + value: The initial value to use. In the non-tf case, this will + be returned as is. In the tf case, this could be a tf-Initializer + object. + framework: One of "tf", "torch", or None. + trainable: Whether the generated variable should be + trainable (tf)/require_grad (torch) or not (default: False). + tf_name: For framework="tf": An optional name for the + tf.Variable. + torch_tensor: For framework="torch": Whether to actually create + a torch.tensor, or just a python value (default). + device: An optional torch device to use for + the created torch tensor. + shape: An optional shape to use iff `value` + does not have any (e.g. if it's an initializer w/o explicit value). + dtype: An optional dtype to use iff `value` does + not have any (e.g. if it's an initializer w/o explicit value). + This should always be a numpy dtype (e.g. np.float32, np.int64). + + Returns: + A framework-specific variable (tf.Variable, torch.tensor, or + python primitive). + """ + if framework in ["tf2", "tf"]: + import tensorflow as tf + + dtype = dtype or getattr( + value, + "dtype", + tf.float32 + if isinstance(value, float) + else tf.int32 + if isinstance(value, int) + else None, + ) + return tf.compat.v1.get_variable( + tf_name, + initializer=value, + dtype=dtype, + trainable=trainable, + **({} if shape is None else {"shape": shape}), + ) + elif framework == "torch" and torch_tensor is True: + torch, _ = try_import_torch() + if not isinstance(value, np.ndarray): + value = np.array(value) + var_ = torch.from_numpy(value) + if dtype in [torch.float32, np.float32]: + var_ = var_.float() + elif dtype in [torch.int32, np.int32]: + var_ = var_.int() + elif dtype in [torch.float64, np.float64]: + var_ = var_.double() + + if device: + var_ = var_.to(device) + var_.requires_grad = trainable + return var_ + # torch or None: Return python primitive. + return value + + +@Deprecated( + old="rllib/utils/framework.py::get_activation_fn", + new="rllib/models/utils.py::get_activation_fn", + error=True, +) +def get_activation_fn(name: Optional[str] = None, framework: str = "tf"): + pass diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/from_config.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/from_config.py new file mode 100644 index 0000000000000000000000000000000000000000..522ba8dd28783f93d41cf257507808f38259c5a4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/from_config.py @@ -0,0 +1,325 @@ +from copy import deepcopy +from functools import partial +import importlib +import json +import os +import re +import yaml + +from ray.rllib.utils.annotations import DeveloperAPI +from ray.rllib.utils import force_list, merge_dicts + + +@DeveloperAPI +def from_config(cls, config=None, **kwargs): + """Uses the given config to create an object. + + If `config` is a dict, an optional "type" key can be used as a + "constructor hint" to specify a certain class of the object. + If `config` is not a dict, `config`'s value is used directly as this + "constructor hint". + + The rest of `config` (if it's a dict) will be used as kwargs for the + constructor. Additional keys in **kwargs will always have precedence + (overwrite keys in `config` (if a dict)). + Also, if the config-dict or **kwargs contains the special key "_args", + it will be popped from the dict and used as *args list to be passed + separately to the constructor. + + The following constructor hints are valid: + - None: Use `cls` as constructor. + - An already instantiated object: Will be returned as is; no + constructor call. + - A string or an object that is a key in `cls`'s `__type_registry__` + dict: The value in `__type_registry__` for that key will be used + as the constructor. + - A python callable: Use that very callable as constructor. + - A string: Either a json/yaml filename or the name of a python + module+class (e.g. "ray.rllib. [...] .[some class name]") + + Args: + cls: The class to build an instance for (from `config`). + config (Optional[dict, str]): The config dict or type-string or + filename. + + Keyword Args: + kwargs: Optional possibility to pass the constructor arguments in + here and use `config` as the type-only info. Then we can call + this like: from_config([type]?, [**kwargs for constructor]) + If `config` is already a dict, then `kwargs` will be merged + with `config` (overwriting keys in `config`) after "type" has + been popped out of `config`. + If a constructor of a Configurable needs *args, the special + key `_args` can be passed inside `kwargs` with a list value + (e.g. kwargs={"_args": [arg1, arg2, arg3]}). + + Returns: + any: The object generated from the config. + """ + # `cls` is the config (config is None). + if config is None and isinstance(cls, (dict, str)): + config = cls + cls = None + # `config` is already a created object of this class -> + # Take it as is. + elif isinstance(cls, type) and isinstance(config, cls): + return config + + # `type_`: Indicator for the Configurable's constructor. + # `ctor_args`: *args arguments for the constructor. + # `ctor_kwargs`: **kwargs arguments for the constructor. + # Try to copy, so caller can reuse safely. + try: + config = deepcopy(config) + except Exception: + pass + if isinstance(config, dict): + type_ = config.pop("type", None) + if type_ is None and isinstance(cls, str): + type_ = cls + ctor_kwargs = config + # Give kwargs priority over things defined in config dict. + # This way, one can pass a generic `spec` and then override single + # constructor parameters via the kwargs in the call to `from_config`. + ctor_kwargs.update(kwargs) + else: + type_ = config + if type_ is None and "type" in kwargs: + type_ = kwargs.pop("type") + ctor_kwargs = kwargs + # Special `_args` field in kwargs for *args-utilizing constructors. + ctor_args = force_list(ctor_kwargs.pop("_args", [])) + + # Figure out the actual constructor (class) from `type_`. + # None: Try __default__object (if no args/kwargs), only then + # constructor of cls (using args/kwargs). + if type_ is None: + # We have a default constructor that was defined directly by cls + # (not by its children). + if ( + cls is not None + and hasattr(cls, "__default_constructor__") + and cls.__default_constructor__ is not None + and ctor_args == [] + and ( + not hasattr(cls.__bases__[0], "__default_constructor__") + or cls.__bases__[0].__default_constructor__ is None + or cls.__bases__[0].__default_constructor__ + is not cls.__default_constructor__ + ) + ): + constructor = cls.__default_constructor__ + # Default constructor's keywords into ctor_kwargs. + if isinstance(constructor, partial): + kwargs = merge_dicts(ctor_kwargs, constructor.keywords) + constructor = partial(constructor.func, **kwargs) + ctor_kwargs = {} # erase to avoid duplicate kwarg error + # No default constructor -> Try cls itself as constructor. + else: + constructor = cls + # Try the __type_registry__ of this class. + else: + constructor = _lookup_type(cls, type_) + + # Found in cls.__type_registry__. + if constructor is not None: + pass + # type_ is False or None (and this value is not registered) -> + # return value of type_. + elif type_ is False or type_ is None: + return type_ + # Python callable. + elif callable(type_): + constructor = type_ + # A string: Filename or a python module+class or a json/yaml str. + elif isinstance(type_, str): + if re.search("\\.(yaml|yml|json)$", type_): + return from_file(cls, type_, *ctor_args, **ctor_kwargs) + # Try un-json/un-yaml'ing the string into a dict. + obj = yaml.safe_load(type_) + if isinstance(obj, dict): + return from_config(cls, obj) + try: + obj = from_config(cls, json.loads(type_)) + except json.JSONDecodeError: + pass + else: + return obj + + # Test for absolute module.class path specifier. + if type_.find(".") != -1: + module_name, function_name = type_.rsplit(".", 1) + try: + module = importlib.import_module(module_name) + constructor = getattr(module, function_name) + # Module not found. + except (ModuleNotFoundError, ImportError, AttributeError): + pass + + # If constructor still not found, try attaching cls' module, + # then look for type_ in there. + if constructor is None: + if isinstance(cls, str): + # Module found, but doesn't have the specified + # c'tor/function. + raise ValueError( + f"Full classpath specifier ({type_}) must be a valid " + "full [module].[class] string! E.g.: " + "`my.cool.module.MyCoolClass`." + ) + + try: + module = importlib.import_module(cls.__module__) + constructor = getattr(module, type_) + except (ModuleNotFoundError, ImportError, AttributeError): + # Try the package as well. + try: + package_name = importlib.import_module( + cls.__module__ + ).__package__ + module = __import__(package_name, fromlist=[type_]) + constructor = getattr(module, type_) + except (ModuleNotFoundError, ImportError, AttributeError): + pass + + if constructor is None: + raise ValueError( + f"String specifier ({type_}) must be a valid filename, " + f"a [module].[class], a class within '{cls.__module__}', " + f"or a key into {cls.__name__}.__type_registry__!" + ) + + if not constructor: + raise TypeError("Invalid type '{}'. Cannot create `from_config`.".format(type_)) + + # Create object with inferred constructor. + try: + object_ = constructor(*ctor_args, **ctor_kwargs) + # Catch attempts to construct from an abstract class and return None. + except TypeError as e: + if re.match("Can't instantiate abstract class", e.args[0]): + return None + raise e # Re-raise + # No sanity check for fake (lambda)-"constructors". + if type(constructor).__name__ != "function": + assert isinstance( + object_, + constructor.func if isinstance(constructor, partial) else constructor, + ) + + return object_ + + +@DeveloperAPI +def from_file(cls, filename, *args, **kwargs): + """ + Create object from config saved in filename. Expects json or yaml file. + + Args: + filename: File containing the config (json or yaml). + + Returns: + any: The object generated from the file. + """ + path = os.path.join(os.getcwd(), filename) + if not os.path.isfile(path): + raise FileNotFoundError("File '{}' not found!".format(filename)) + + with open(path, "rt") as fp: + if path.endswith(".yaml") or path.endswith(".yml"): + config = yaml.safe_load(fp) + else: + config = json.load(fp) + + # Add possible *args. + config["_args"] = args + return from_config(cls, config=config, **kwargs) + + +def _lookup_type(cls, type_): + if ( + cls is not None + and hasattr(cls, "__type_registry__") + and isinstance(cls.__type_registry__, dict) + and ( + type_ in cls.__type_registry__ + or ( + isinstance(type_, str) + and re.sub("[\\W_]", "", type_.lower()) in cls.__type_registry__ + ) + ) + ): + available_class_for_type = cls.__type_registry__.get(type_) + if available_class_for_type is None: + available_class_for_type = cls.__type_registry__[ + re.sub("[\\W_]", "", type_.lower()) + ] + return available_class_for_type + return None + + +class _NotProvided: + """Singleton class to provide a "not provided" value for AlgorithmConfig signatures. + + Using the only instance of this class indicates that the user does NOT wish to + change the value of some property. + + .. testcode:: + :skipif: True + + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + config = AlgorithmConfig() + # Print out the default learning rate. + print(config.lr) + + .. testoutput:: + + 0.001 + + .. testcode:: + :skipif: True + + # Print out the default `preprocessor_pref`. + print(config.preprocessor_pref) + + .. testoutput:: + + "deepmind" + + .. testcode:: + :skipif: True + + # Will only set the `preprocessor_pref` property (to None) and leave + # all other properties at their default values. + config.training(preprocessor_pref=None) + config.preprocessor_pref is None + + .. testoutput:: + + True + + .. testcode:: + :skipif: True + + # Still the same value (didn't touch it in the call to `.training()`. + print(config.lr) + + .. testoutput:: + + 0.001 + """ + + class __NotProvided: + pass + + instance = None + + def __init__(self): + if _NotProvided.instance is None: + _NotProvided.instance = _NotProvided.__NotProvided() + + +# Use this object as default values in all method signatures of +# AlgorithmConfig, indicating that the respective property should NOT be touched +# in the call. +NotProvided = _NotProvided() diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/images.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/images.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0f1601d574a5e34805b30cfed7ca9c391c1f0c --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/images.py @@ -0,0 +1,60 @@ +import logging +import importlib + +import numpy as np + +from ray.rllib.utils.annotations import DeveloperAPI + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +def is_package_installed(package_name): + try: + importlib.metadata.version(package_name) + return True + except importlib.metadata.PackageNotFoundError: + return False + + +try: + import cv2 + + cv2.ocl.setUseOpenCL(False) + + logger.debug("CV2 found for image processing.") +except ImportError as e: + if is_package_installed("opencv-python"): + raise ImportError( + f"OpenCV is installed, but we failed to import it. This may be because " + f"you need to install `opencv-python-headless` instead of " + f"`opencv-python`. Error message: {e}", + ) + cv2 = None + + +@DeveloperAPI +def resize(img: np.ndarray, height: int, width: int) -> np.ndarray: + if not cv2: + raise ModuleNotFoundError( + "`opencv` not installed! Do `pip install opencv-python`" + ) + return cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) + + +@DeveloperAPI +def rgb2gray(img: np.ndarray) -> np.ndarray: + if not cv2: + raise ModuleNotFoundError( + "`opencv` not installed! Do `pip install opencv-python`" + ) + return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + + +@DeveloperAPI +def imread(img_file: str) -> np.ndarray: + if not cv2: + raise ModuleNotFoundError( + "`opencv` not installed! Do `pip install opencv-python`" + ) + return cv2.imread(img_file).astype(np.float32) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/lambda_defaultdict.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/lambda_defaultdict.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4653961c565304570da87eea9066dc366643e0 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/lambda_defaultdict.py @@ -0,0 +1,52 @@ +from collections import defaultdict +from typing import Any, Callable + + +class LambdaDefaultDict(defaultdict): + """A defaultdict that creates default values based on the associated key. + + Note that the standard defaultdict can only produce default values (via its factory) + that are independent of the key under which they are stored. + As opposed to that, the lambda functions used as factories for this + `LambdaDefaultDict` class do accept a single argument: The missing key. + If a missing key is accessed by the user, the provided lambda function is called + with this missing key as its argument. The returned value is stored in the + dictionary under that key and returned. + + Example: + + In this example, if you try to access a key that doesn't exist, it will call + the lambda function, passing it the missing key. The function will return a + string, which will be stored in the dictionary under that key. + + .. testcode:: + + from ray.rllib.utils.lambda_defaultdict import LambdaDefaultDict + + default_dict = LambdaDefaultDict(lambda missing_key: f"Value for {missing_key}") + print(default_dict["a"]) + + .. testoutput:: + + Value for a + """ # noqa: E501 + + def __init__(self, default_factory: Callable[[str], Any], *args, **kwargs): + """Initializes a LambdaDefaultDict instance. + + Args: + default_factory: The default factory callable, taking a string (key) + and returning the default value to use for that key. + """ + if not callable(default_factory): + raise TypeError("First argument must be a Callable!") + + # We will handle the factory in __missing__ method. + super().__init__(None, *args, **kwargs) + + self.default_factory = default_factory + + def __missing__(self, key): + # Call default factory with the key as argument. + self[key] = value = self.default_factory(key) + return value diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/memory.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..fe739cc0f99b8f23f68f31021b446cbf06f64d17 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/memory.py @@ -0,0 +1,8 @@ +from ray.rllib.utils.deprecation import deprecation_warning +from ray.rllib.utils.numpy import aligned_array, concat_aligned # noqa + +deprecation_warning( + old="ray.rllib.utils.memory.[...]", + new="ray.rllib.utils.numpy.[...]", + error=True, +) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/numpy.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..831a4fbcf5365cae130d638569cad20454e5e9fd --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/numpy.py @@ -0,0 +1,606 @@ +from collections import OrderedDict +from gymnasium.spaces import Discrete, MultiDiscrete +import numpy as np +import tree # pip install dm_tree +from types import MappingProxyType +from typing import List, Optional + + +from ray.rllib.utils.annotations import PublicAPI +from ray.rllib.utils.deprecation import Deprecated +from ray.rllib.utils.framework import try_import_tf, try_import_torch +from ray.rllib.utils.typing import SpaceStruct, TensorType, TensorStructType, Union + +tf1, tf, tfv = try_import_tf() +torch, _ = try_import_torch() + +SMALL_NUMBER = 1e-6 +# Some large int number. May be increased here, if needed. +LARGE_INTEGER = 100000000 +# Min and Max outputs (clipped) from an NN-output layer interpreted as the +# log(x) of some x (e.g. a stddev of a normal +# distribution). +MIN_LOG_NN_OUTPUT = -5 +MAX_LOG_NN_OUTPUT = 2 + + +@PublicAPI +@Deprecated( + help="RLlib itself has no use for this anymore.", + error=False, +) +def aligned_array(size: int, dtype, align: int = 64) -> np.ndarray: + """Returns an array of a given size that is 64-byte aligned. + + The returned array can be efficiently copied into GPU memory by TensorFlow. + + Args: + size: The size (total number of items) of the array. For example, + array([[0.0, 1.0], [2.0, 3.0]]) would have size=4. + dtype: The numpy dtype of the array. + align: The alignment to use. + + Returns: + A np.ndarray with the given specifications. + """ + n = size * dtype.itemsize + empty = np.empty(n + (align - 1), dtype=np.uint8) + data_align = empty.ctypes.data % align + offset = 0 if data_align == 0 else (align - data_align) + if n == 0: + # stop np from optimising out empty slice reference + output = empty[offset : offset + 1][0:0].view(dtype) + else: + output = empty[offset : offset + n].view(dtype) + + assert len(output) == size, len(output) + assert output.ctypes.data % align == 0, output.ctypes.data + return output + + +@PublicAPI +@Deprecated( + help="RLlib itself has no use for this anymore.", + error=False, +) +def concat_aligned( + items: List[np.ndarray], time_major: Optional[bool] = None +) -> np.ndarray: + """Concatenate arrays, ensuring the output is 64-byte aligned. + + We only align float arrays; other arrays are concatenated as normal. + + This should be used instead of np.concatenate() to improve performance + when the output array is likely to be fed into TensorFlow. + + Args: + items: The list of items to concatenate and align. + time_major: Whether the data in items is time-major, in which + case, we will concatenate along axis=1. + + Returns: + The concat'd and aligned array. + """ + + if len(items) == 0: + return [] + elif len(items) == 1: + # we assume the input is aligned. In any case, it doesn't help + # performance to force align it since that incurs a needless copy. + return items[0] + elif isinstance(items[0], np.ndarray) and items[0].dtype in [ + np.float32, + np.float64, + np.uint8, + ]: + dtype = items[0].dtype + flat = aligned_array(sum(s.size for s in items), dtype) + if time_major is not None: + if time_major is True: + batch_dim = sum(s.shape[1] for s in items) + new_shape = (items[0].shape[0], batch_dim,) + items[ + 0 + ].shape[2:] + else: + batch_dim = sum(s.shape[0] for s in items) + new_shape = (batch_dim, items[0].shape[1],) + items[ + 0 + ].shape[2:] + else: + batch_dim = sum(s.shape[0] for s in items) + new_shape = (batch_dim,) + items[0].shape[1:] + output = flat.reshape(new_shape) + assert output.ctypes.data % 64 == 0, output.ctypes.data + np.concatenate(items, out=output, axis=1 if time_major else 0) + return output + else: + return np.concatenate(items, axis=1 if time_major else 0) + + +@PublicAPI +def convert_to_numpy(x: TensorStructType, reduce_type: bool = True) -> TensorStructType: + """Converts values in `stats` to non-Tensor numpy or python types. + + Args: + x: Any (possibly nested) struct, the values in which will be + converted and returned as a new struct with all torch/tf tensors + being converted to numpy types. + reduce_type: Whether to automatically reduce all float64 and int64 data + into float32 and int32 data, respectively. + + Returns: + A new struct with the same structure as `x`, but with all + values converted to numpy arrays (on CPU). + """ + + # The mapping function used to numpyize torch/tf Tensors (and move them + # to the CPU beforehand). + def mapping(item): + if torch and isinstance(item, torch.Tensor): + ret = ( + item.cpu().item() + if len(item.size()) == 0 + else item.detach().cpu().numpy() + ) + elif ( + tf and isinstance(item, (tf.Tensor, tf.Variable)) and hasattr(item, "numpy") + ): + assert tf.executing_eagerly() + ret = item.numpy() + else: + ret = item + if reduce_type and isinstance(ret, np.ndarray): + if np.issubdtype(ret.dtype, np.floating): + ret = ret.astype(np.float32) + elif np.issubdtype(ret.dtype, int): + ret = ret.astype(np.int32) + return ret + return ret + + return tree.map_structure(mapping, x) + + +@PublicAPI +def fc( + x: np.ndarray, + weights: np.ndarray, + biases: Optional[np.ndarray] = None, + framework: Optional[str] = None, +) -> np.ndarray: + """Calculates FC (dense) layer outputs given weights/biases and input. + + Args: + x: The input to the dense layer. + weights: The weights matrix. + biases: The biases vector. All 0s if None. + framework: An optional framework hint (to figure out, + e.g. whether to transpose torch weight matrices). + + Returns: + The dense layer's output. + """ + + def map_(data, transpose=False): + if torch: + if isinstance(data, torch.Tensor): + data = data.cpu().detach().numpy() + if tf and tf.executing_eagerly(): + if isinstance(data, tf.Variable): + data = data.numpy() + if transpose: + data = np.transpose(data) + return data + + x = map_(x) + # Torch stores matrices in transpose (faster for backprop). + transpose = framework == "torch" and ( + x.shape[1] != weights.shape[0] and x.shape[1] == weights.shape[1] + ) + weights = map_(weights, transpose=transpose) + biases = map_(biases) + + return np.matmul(x, weights) + (0.0 if biases is None else biases) + + +@PublicAPI +def flatten_inputs_to_1d_tensor( + inputs: TensorStructType, + spaces_struct: Optional[SpaceStruct] = None, + time_axis: bool = False, + batch_axis: bool = True, +) -> TensorType: + """Flattens arbitrary input structs according to the given spaces struct. + + Returns a single 1D tensor resulting from the different input + components' values. + + Thereby: + - Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes + are not treated differently from other types of Boxes and get + flattened as well. + - Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with + Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]. + - MultiDiscrete values are multi-one-hot'd, e.g. a batch of + [[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in + [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]. + + Args: + inputs: The inputs to be flattened. + spaces_struct: The (possibly nested) structure of the spaces that `inputs` + belongs to. + time_axis: Whether all inputs have a time-axis (after the batch axis). + If True, will keep not only the batch axis (0th), but the time axis + (1st) as-is and flatten everything from the 2nd axis up. + batch_axis: Whether all inputs have a batch axis. + If True, will keep that batch axis as-is and flatten everything from the + other dims up. + + Returns: + A single 1D tensor resulting from concatenating all + flattened/one-hot'd input components. Depending on the time_axis flag, + the shape is (B, n) or (B, T, n). + + .. testcode:: + :skipif: True + + # B=2 + from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor + from gymnasium.spaces import Discrete, Box + out = flatten_inputs_to_1d_tensor( + {"a": [1, 0], "b": [[[0.0], [0.1]], [1.0], [1.1]]}, + spaces_struct=dict(a=Discrete(2), b=Box(shape=(2, 1))) + ) + print(out) + + # B=2; T=2 + out = flatten_inputs_to_1d_tensor( + ([[1, 0], [0, 1]], + [[[0.0, 0.1], [1.0, 1.1]], [[2.0, 2.1], [3.0, 3.1]]]), + spaces_struct=tuple([Discrete(2), Box(shape=(2, ))]), + time_axis=True + ) + print(out) + + .. testoutput:: + + [[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]] # B=2 n=4 + [[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]], + [[1.0, 0.0, 2.0, 2.1], [0.0, 1.0, 3.0, 3.1]]] # B=2 T=2 n=4 + """ + # `time_axis` must not be True if `batch_axis` is False. + assert not (time_axis and not batch_axis) + + flat_inputs = tree.flatten(inputs) + flat_spaces = ( + tree.flatten(spaces_struct) + if spaces_struct is not None + else [None] * len(flat_inputs) + ) + + B = None + T = None + out = [] + for input_, space in zip(flat_inputs, flat_spaces): + # Store batch and (if applicable) time dimension. + if B is None and batch_axis: + B = input_.shape[0] + if time_axis: + T = input_.shape[1] + + # One-hot encoding. + if isinstance(space, Discrete): + if time_axis: + input_ = np.reshape(input_, [B * T]) + out.append(one_hot(input_, depth=space.n).astype(np.float32)) + # Multi one-hot encoding. + elif isinstance(space, MultiDiscrete): + if time_axis: + input_ = np.reshape(input_, [B * T, -1]) + if batch_axis: + out.append( + np.concatenate( + [ + one_hot(input_[:, i], depth=n).astype(np.float32) + for i, n in enumerate(space.nvec) + ], + axis=-1, + ) + ) + else: + out.append( + np.concatenate( + [ + one_hot(input_[i], depth=n).astype(np.float32) + for i, n in enumerate(space.nvec) + ], + axis=-1, + ) + ) + # Box: Flatten. + else: + # Special case for spaces: Box(.., shape=(), ..) + if isinstance(input_, float): + input_ = np.array([input_]) + + if time_axis: + input_ = np.reshape(input_, [B * T, -1]) + elif batch_axis: + input_ = np.reshape(input_, [B, -1]) + else: + input_ = np.reshape(input_, [-1]) + out.append(input_.astype(np.float32)) + + merged = np.concatenate(out, axis=-1) + # Restore the time-dimension, if applicable. + if time_axis: + merged = np.reshape(merged, [B, T, -1]) + return merged + + +@PublicAPI +def make_action_immutable(obj): + """Flags actions immutable to notify users when trying to change them. + + Can also be used with any tree-like structure containing either + dictionaries, numpy arrays or already immutable objects per se. + Note, however that `tree.map_structure()` will in general not + include the shallow object containing all others and therefore + immutability will hold only for all objects contained in it. + Use `tree.traverse(fun, action, top_down=False)` to include + also the containing object. + + Args: + obj: The object to be made immutable. + + Returns: + The immutable object. + + .. testcode:: + :skipif: True + + import tree + import numpy as np + from ray.rllib.utils.numpy import make_action_immutable + arr = np.arange(1,10) + d = dict(a = 1, b = (arr, arr)) + tree.traverse(make_action_immutable, d, top_down=False) + """ + if isinstance(obj, np.ndarray): + obj.setflags(write=False) + return obj + elif isinstance(obj, OrderedDict): + return MappingProxyType(dict(obj)) + elif isinstance(obj, dict): + return MappingProxyType(obj) + else: + return obj + + +@PublicAPI +def huber_loss(x: np.ndarray, delta: float = 1.0) -> np.ndarray: + """Reference: https://en.wikipedia.org/wiki/Huber_loss.""" + return np.where( + np.abs(x) < delta, np.power(x, 2.0) * 0.5, delta * (np.abs(x) - 0.5 * delta) + ) + + +@PublicAPI +def l2_loss(x: np.ndarray) -> np.ndarray: + """Computes half the L2 norm of a tensor (w/o the sqrt): sum(x**2) / 2. + + Args: + x: The input tensor. + + Returns: + The l2-loss output according to the above formula given `x`. + """ + return np.sum(np.square(x)) / 2.0 + + +@PublicAPI +def lstm( + x, + weights: np.ndarray, + biases: Optional[np.ndarray] = None, + initial_internal_states: Optional[np.ndarray] = None, + time_major: bool = False, + forget_bias: float = 1.0, +): + """Calculates LSTM layer output given weights/biases, states, and input. + + Args: + x: The inputs to the LSTM layer including time-rank + (0th if time-major, else 1st) and the batch-rank + (1st if time-major, else 0th). + weights: The weights matrix. + biases: The biases vector. All 0s if None. + initial_internal_states: The initial internal + states to pass into the layer. All 0s if None. + time_major: Whether to use time-major or not. Default: False. + forget_bias: Gets added to first sigmoid (forget gate) output. + Default: 1.0. + + Returns: + Tuple consisting of 1) The LSTM layer's output and + 2) Tuple: Last (c-state, h-state). + """ + sequence_length = x.shape[0 if time_major else 1] + batch_size = x.shape[1 if time_major else 0] + units = weights.shape[1] // 4 # 4 internal layers (3x sigmoid, 1x tanh) + + if initial_internal_states is None: + c_states = np.zeros(shape=(batch_size, units)) + h_states = np.zeros(shape=(batch_size, units)) + else: + c_states = initial_internal_states[0] + h_states = initial_internal_states[1] + + # Create a placeholder for all n-time step outputs. + if time_major: + unrolled_outputs = np.zeros(shape=(sequence_length, batch_size, units)) + else: + unrolled_outputs = np.zeros(shape=(batch_size, sequence_length, units)) + + # Push the batch 4 times through the LSTM cell and capture the outputs plus + # the final h- and c-states. + for t in range(sequence_length): + input_matrix = x[t, :, :] if time_major else x[:, t, :] + input_matrix = np.concatenate((input_matrix, h_states), axis=1) + input_matmul_matrix = np.matmul(input_matrix, weights) + biases + # Forget gate (3rd slot in tf output matrix). Add static forget bias. + sigmoid_1 = sigmoid(input_matmul_matrix[:, units * 2 : units * 3] + forget_bias) + c_states = np.multiply(c_states, sigmoid_1) + # Add gate (1st and 2nd slots in tf output matrix). + sigmoid_2 = sigmoid(input_matmul_matrix[:, 0:units]) + tanh_3 = np.tanh(input_matmul_matrix[:, units : units * 2]) + c_states = np.add(c_states, np.multiply(sigmoid_2, tanh_3)) + # Output gate (last slot in tf output matrix). + sigmoid_4 = sigmoid(input_matmul_matrix[:, units * 3 : units * 4]) + h_states = np.multiply(sigmoid_4, np.tanh(c_states)) + + # Store this output time-slice. + if time_major: + unrolled_outputs[t, :, :] = h_states + else: + unrolled_outputs[:, t, :] = h_states + + return unrolled_outputs, (c_states, h_states) + + +@PublicAPI +def one_hot( + x: Union[TensorType, int], + depth: int = 0, + on_value: float = 1.0, + off_value: float = 0.0, + dtype: type = np.float32, +) -> np.ndarray: + """One-hot utility function for numpy. + + Thanks to qianyizhang: + https://gist.github.com/qianyizhang/07ee1c15cad08afb03f5de69349efc30. + + Args: + x: The input to be one-hot encoded. + depth: The max. number to be one-hot encoded (size of last rank). + on_value: The value to use for on. Default: 1.0. + off_value: The value to use for off. Default: 0.0. + + Returns: + The one-hot encoded equivalent of the input array. + """ + + # Handle simple ints properly. + if isinstance(x, int): + x = np.array(x, dtype=np.int32) + # Handle torch arrays properly. + elif torch and isinstance(x, torch.Tensor): + x = x.numpy() + + # Handle bool arrays correctly. + if x.dtype == np.bool_: + x = x.astype(np.int_) + depth = 2 + + # If depth is not given, try to infer it from the values in the array. + if depth == 0: + depth = np.max(x) + 1 + assert ( + np.max(x) < depth + ), "ERROR: The max. index of `x` ({}) is larger than depth ({})!".format( + np.max(x), depth + ) + shape = x.shape + + out = np.ones(shape=(*shape, depth)) * off_value + indices = [] + for i in range(x.ndim): + tiles = [1] * x.ndim + s = [1] * x.ndim + s[i] = -1 + r = np.arange(shape[i]).reshape(s) + if i > 0: + tiles[i - 1] = shape[i - 1] + r = np.tile(r, tiles) + indices.append(r) + indices.append(x) + out[tuple(indices)] = on_value + return out.astype(dtype) + + +@PublicAPI +def one_hot_multidiscrete(x, depths=List[int]): + # Handle torch arrays properly. + if torch and isinstance(x, torch.Tensor): + x = x.numpy() + + shape = x.shape + return np.concatenate( + [ + one_hot(x[i] if len(shape) == 1 else x[:, i], depth=n).astype(np.float32) + for i, n in enumerate(depths) + ], + axis=-1, + ) + + +@PublicAPI +def relu(x: np.ndarray, alpha: float = 0.0) -> np.ndarray: + """Implementation of the leaky ReLU function. + + y = x * alpha if x < 0 else x + + Args: + x: The input values. + alpha: A scaling ("leak") factor to use for negative x. + + Returns: + The leaky ReLU output for x. + """ + return np.maximum(x, x * alpha, x) + + +@PublicAPI +def sigmoid(x: np.ndarray, derivative: bool = False) -> np.ndarray: + """ + Returns the sigmoid function applied to x. + Alternatively, can return the derivative or the sigmoid function. + + Args: + x: The input to the sigmoid function. + derivative: Whether to return the derivative or not. + Default: False. + + Returns: + The sigmoid function (or its derivative) applied to x. + """ + if derivative: + return x * (1 - x) + else: + return 1 / (1 + np.exp(-x)) + + +@PublicAPI +def softmax( + x: Union[np.ndarray, list], axis: int = -1, epsilon: Optional[float] = None +) -> np.ndarray: + """Returns the softmax values for x. + + The exact formula used is: + S(xi) = e^xi / SUMj(e^xj), where j goes over all elements in x. + + Args: + x: The input to the softmax function. + axis: The axis along which to softmax. + epsilon: Optional epsilon as a minimum value. If None, use + `SMALL_NUMBER`. + + Returns: + The softmax over x. + """ + epsilon = epsilon or SMALL_NUMBER + # x_exp = np.maximum(np.exp(x), SMALL_NUMBER) + x_exp = np.exp(x) + # return x_exp / + # np.maximum(np.sum(x_exp, axis, keepdims=True), SMALL_NUMBER) + return np.maximum(x_exp / np.sum(x_exp, axis, keepdims=True), epsilon) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/policy.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..9cadcb08b054749cfb824ec3df49a51f75d85226 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/policy.py @@ -0,0 +1,303 @@ +import gymnasium as gym +import logging +import numpy as np +from typing import ( + Callable, + Dict, + List, + Optional, + Tuple, + Type, + Union, + TYPE_CHECKING, +) +import tree # pip install dm_tree + + +import ray.cloudpickle as pickle +from ray.rllib.core.rl_module import validate_module_id +from ray.rllib.models.preprocessors import ATARI_OBS_SHAPE +from ray.rllib.policy.policy import PolicySpec +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.deprecation import Deprecated +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.typing import ( + ActionConnectorDataType, + AgentConnectorDataType, + AgentConnectorsOutput, + PartialAlgorithmConfigDict, + PolicyState, + TensorStructType, + TensorType, +) +from ray.util import log_once +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + from ray.rllib.policy.policy import Policy + +logger = logging.getLogger(__name__) + +tf1, tf, tfv = try_import_tf() + + +@PublicAPI +def create_policy_for_framework( + policy_id: str, + policy_class: Type["Policy"], + merged_config: PartialAlgorithmConfigDict, + observation_space: gym.Space, + action_space: gym.Space, + worker_index: int = 0, + session_creator: Optional[Callable[[], "tf1.Session"]] = None, + seed: Optional[int] = None, +): + """Framework-specific policy creation logics. + + Args: + policy_id: Policy ID. + policy_class: Policy class type. + merged_config: Complete policy config. + observation_space: Observation space of env. + action_space: Action space of env. + worker_index: Index of worker holding this policy. Default is 0. + session_creator: An optional tf1.Session creation callable. + seed: Optional random seed. + """ + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + + if isinstance(merged_config, AlgorithmConfig): + merged_config = merged_config.to_dict() + + # add policy_id to merged_config + merged_config["__policy_id"] = policy_id + + framework = merged_config.get("framework", "tf") + # Tf. + if framework in ["tf2", "tf"]: + var_scope = policy_id + (f"_wk{worker_index}" if worker_index else "") + # For tf static graph, build every policy in its own graph + # and create a new session for it. + if framework == "tf": + with tf1.Graph().as_default(): + # Session creator function provided manually -> Use this one to + # create the tf1 session. + if session_creator: + sess = session_creator() + # Use a default session creator, based only on our `tf_session_args` in + # the config. + else: + sess = tf1.Session( + config=tf1.ConfigProto(**merged_config["tf_session_args"]) + ) + + with sess.as_default(): + # Set graph-level seed. + if seed is not None: + tf1.set_random_seed(seed) + with tf1.variable_scope(var_scope): + return policy_class( + observation_space, action_space, merged_config + ) + # For tf-eager: no graph, no session. + else: + with tf1.variable_scope(var_scope): + return policy_class(observation_space, action_space, merged_config) + # Non-tf: No graph, no session. + else: + return policy_class(observation_space, action_space, merged_config) + + +@PublicAPI(stability="alpha") +def parse_policy_specs_from_checkpoint( + path: str, +) -> Tuple[PartialAlgorithmConfigDict, Dict[str, PolicySpec], Dict[str, PolicyState]]: + """Read and parse policy specifications from a checkpoint file. + + Args: + path: Path to a policy checkpoint. + + Returns: + A tuple of: base policy config, dictionary of policy specs, and + dictionary of policy states. + """ + with open(path, "rb") as f: + checkpoint_dict = pickle.load(f) + # Policy data is contained as a serialized binary blob under their + # ID keys. + w = pickle.loads(checkpoint_dict["worker"]) + + policy_config = w["policy_config"] + policy_states = w.get("policy_states", w["state"]) + serialized_policy_specs = w["policy_specs"] + policy_specs = { + id: PolicySpec.deserialize(spec) for id, spec in serialized_policy_specs.items() + } + + return policy_config, policy_specs, policy_states + + +@PublicAPI(stability="alpha") +def local_policy_inference( + policy: "Policy", + env_id: str, + agent_id: str, + obs: TensorStructType, + reward: Optional[float] = None, + terminated: Optional[bool] = None, + truncated: Optional[bool] = None, + info: Optional[Dict] = None, + explore: bool = None, + timestep: Optional[int] = None, +) -> TensorStructType: + """Run a connector enabled policy using environment observation. + + policy_inference manages policy and agent/action connectors, + so the user does not have to care about RNN state buffering or + extra fetch dictionaries. + Note that connectors are intentionally run separately from + compute_actions_from_input_dict(), so we can have the option + of running per-user connectors on the client side in a + server-client deployment. + + Args: + policy: Policy object used in inference. + env_id: Environment ID. RLlib builds environments' trajectories internally with + connectors based on this, i.e. one trajectory per (env_id, agent_id) tuple. + agent_id: Agent ID. RLlib builds agents' trajectories internally with connectors + based on this, i.e. one trajectory per (env_id, agent_id) tuple. + obs: Environment observation to base the action on. + reward: Reward that is potentially used during inference. If not required, + may be left empty. Some policies have ViewRequirements that require this. + This can be set to zero at the first inference step - for example after + calling gmy.Env.reset. + terminated: `Terminated` flag that is potentially used during inference. If not + required, may be left None. Some policies have ViewRequirements that + require this extra information. + truncated: `Truncated` flag that is potentially used during inference. If not + required, may be left None. Some policies have ViewRequirements that + require this extra information. + info: Info that is potentially used durin inference. If not required, + may be left empty. Some policies have ViewRequirements that require this. + explore: Whether to pick an exploitation or exploration action + (default: None -> use self.config["explore"]). + timestep: The current (sampling) time step. + + Returns: + List of outputs from policy forward pass. + """ + assert ( + policy.agent_connectors + ), "policy_inference only works with connector enabled policies." + + __check_atari_obs_space(obs) + + # Put policy in inference mode, so we don't spend time on training + # only transformations. + policy.agent_connectors.in_eval() + policy.action_connectors.in_eval() + + # TODO(jungong) : support multiple env, multiple agent inference. + input_dict = {SampleBatch.NEXT_OBS: obs} + if reward is not None: + input_dict[SampleBatch.REWARDS] = reward + if terminated is not None: + input_dict[SampleBatch.TERMINATEDS] = terminated + if truncated is not None: + input_dict[SampleBatch.TRUNCATEDS] = truncated + if info is not None: + input_dict[SampleBatch.INFOS] = info + + acd_list: List[AgentConnectorDataType] = [ + AgentConnectorDataType(env_id, agent_id, input_dict) + ] + ac_outputs: List[AgentConnectorsOutput] = policy.agent_connectors(acd_list) + outputs = [] + for ac in ac_outputs: + policy_output = policy.compute_actions_from_input_dict( + ac.data.sample_batch, + explore=explore, + timestep=timestep, + ) + + # Note (Kourosh): policy output is batched, the AgentConnectorDataType should + # not be batched during inference. This is the assumption made in AgentCollector + policy_output = tree.map_structure(lambda x: x[0], policy_output) + + action_connector_data = ActionConnectorDataType( + env_id, agent_id, ac.data.raw_dict, policy_output + ) + + if policy.action_connectors: + acd = policy.action_connectors(action_connector_data) + actions = acd.output + else: + actions = policy_output[0] + + outputs.append(actions) + + # Notify agent connectors with this new policy output. + # Necessary for state buffering agent connectors, for example. + policy.agent_connectors.on_policy_output(action_connector_data) + return outputs + + +@PublicAPI +def compute_log_likelihoods_from_input_dict( + policy: "Policy", batch: Union[SampleBatch, Dict[str, TensorStructType]] +): + """Returns log likelihood for actions in given batch for policy. + + Computes likelihoods by passing the observations through the current + policy's `compute_log_likelihoods()` method + + Args: + batch: The SampleBatch or MultiAgentBatch to calculate action + log likelihoods from. This batch/batches must contain OBS + and ACTIONS keys. + + Returns: + The probabilities of the actions in the batch, given the + observations and the policy. + """ + num_state_inputs = 0 + for k in batch.keys(): + if k.startswith("state_in_"): + num_state_inputs += 1 + state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)] + log_likelihoods: TensorType = policy.compute_log_likelihoods( + actions=batch[SampleBatch.ACTIONS], + obs_batch=batch[SampleBatch.OBS], + state_batches=[batch[k] for k in state_keys], + prev_action_batch=batch.get(SampleBatch.PREV_ACTIONS), + prev_reward_batch=batch.get(SampleBatch.PREV_REWARDS), + actions_normalized=policy.config.get("actions_in_input_normalized", False), + ) + return log_likelihoods + + +@Deprecated(new="Policy.from_checkpoint([checkpoint path], [policy IDs]?)", error=True) +def load_policies_from_checkpoint(path, policy_ids=None): + pass + + +def __check_atari_obs_space(obs): + # TODO(Artur): Remove this after we have migrated deepmind style preprocessing into + # connectors (and don't auto-wrap in RW anymore) + if any( + o.shape == ATARI_OBS_SHAPE if isinstance(o, np.ndarray) else False + for o in tree.flatten(obs) + ): + if log_once("warn_about_possibly_non_wrapped_atari_env"): + logger.warning( + "The observation you fed into local_policy_inference() has " + "dimensions (210, 160, 3), which is the standard for atari " + "environments. If RLlib raises an error including a related " + "dimensionality mismatch, you may need to use " + "ray.rllib.env.wrappers.atari_wrappers.wrap_deepmind to wrap " + "you environment." + ) + + +# @OldAPIStack +validate_policy_id = validate_module_id diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/serialization.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..30eb1aacc5d466f623d8efbeaaac64dec5787f9a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/serialization.py @@ -0,0 +1,418 @@ +import base64 +from collections import OrderedDict +import importlib +import io +import zlib +from typing import Any, Dict, Optional, Sequence, Type, Union + +import gymnasium as gym +import numpy as np + +import ray +from ray.rllib.utils.annotations import DeveloperAPI +from ray.rllib.utils.error import NotSerializable +from ray.rllib.utils.spaces.flexdict import FlexDict +from ray.rllib.utils.spaces.repeated import Repeated +from ray.rllib.utils.spaces.simplex import Simplex + +NOT_SERIALIZABLE = "__not_serializable__" + + +@DeveloperAPI +def convert_numpy_to_python_primitives(obj: Any): + """Convert an object that is a numpy type to a python type. + + If the object is not a numpy type, it is returned unchanged. + + Args: + obj: The object to convert. + """ + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.bool_): + return bool(obj) + elif isinstance(obj, np.str_): + return str(obj) + elif isinstance(obj, np.ndarray): + ret = obj.tolist() + for i, v in enumerate(ret): + ret[i] = convert_numpy_to_python_primitives(v) + return ret + else: + return obj + + +def _serialize_ndarray(array: np.ndarray) -> str: + """Pack numpy ndarray into Base64 encoded strings for serialization. + + This function uses numpy.save() instead of pickling to ensure + compatibility. + + Args: + array: numpy ndarray. + + Returns: + b64 escaped string. + """ + buf = io.BytesIO() + np.save(buf, array) + return base64.b64encode(zlib.compress(buf.getvalue())).decode("ascii") + + +def _deserialize_ndarray(b64_string: str) -> np.ndarray: + """Unpack b64 escaped string into numpy ndarray. + + This function assumes the unescaped bytes are of npy format. + + Args: + b64_string: Base64 escaped string. + + Returns: + numpy ndarray. + """ + return np.load( + io.BytesIO(zlib.decompress(base64.b64decode(b64_string))), allow_pickle=True + ) + + +@DeveloperAPI +def gym_space_to_dict(space: gym.spaces.Space) -> Dict: + """Serialize a gym Space into a JSON-serializable dict. + + Args: + space: gym.spaces.Space + + Returns: + Serialized JSON string. + """ + if space is None: + return None + + def _box(sp: gym.spaces.Box) -> Dict: + return { + "space": "box", + "low": _serialize_ndarray(sp.low), + "high": _serialize_ndarray(sp.high), + "shape": sp._shape, # shape is a tuple. + "dtype": sp.dtype.str, + } + + def _discrete(sp: gym.spaces.Discrete) -> Dict: + d = { + "space": "discrete", + "n": int(sp.n), + } + # Offset is a relatively new Discrete space feature. + if hasattr(sp, "start"): + d["start"] = int(sp.start) + return d + + def _multi_binary(sp: gym.spaces.MultiBinary) -> Dict: + return { + "space": "multi-binary", + "n": sp.n, + } + + def _multi_discrete(sp: gym.spaces.MultiDiscrete) -> Dict: + return { + "space": "multi-discrete", + "nvec": _serialize_ndarray(sp.nvec), + "dtype": sp.dtype.str, + } + + def _tuple(sp: gym.spaces.Tuple) -> Dict: + return { + "space": "tuple", + "spaces": [gym_space_to_dict(sp) for sp in sp.spaces], + } + + def _dict(sp: gym.spaces.Dict) -> Dict: + return { + "space": "dict", + "spaces": {k: gym_space_to_dict(sp) for k, sp in sp.spaces.items()}, + } + + def _simplex(sp: Simplex) -> Dict: + return { + "space": "simplex", + "shape": sp._shape, # shape is a tuple. + "concentration": sp.concentration, + "dtype": sp.dtype.str, + } + + def _repeated(sp: Repeated) -> Dict: + return { + "space": "repeated", + "child_space": gym_space_to_dict(sp.child_space), + "max_len": sp.max_len, + } + + def _flex_dict(sp: FlexDict) -> Dict: + d = { + "space": "flex_dict", + } + for k, s in sp.spaces: + d[k] = gym_space_to_dict(s) + return d + + def _text(sp: "gym.spaces.Text") -> Dict: + # Note (Kourosh): This only works in gym >= 0.25.0 + charset = getattr(sp, "character_set", None) + if charset is None: + charset = getattr(sp, "charset", None) + if charset is None: + raise ValueError( + "Text space must have a character_set or charset attribute" + ) + return { + "space": "text", + "min_length": sp.min_length, + "max_length": sp.max_length, + "charset": charset, + } + + if isinstance(space, gym.spaces.Box): + return _box(space) + elif isinstance(space, gym.spaces.Discrete): + return _discrete(space) + elif isinstance(space, gym.spaces.MultiBinary): + return _multi_binary(space) + elif isinstance(space, gym.spaces.MultiDiscrete): + return _multi_discrete(space) + elif isinstance(space, gym.spaces.Tuple): + return _tuple(space) + elif isinstance(space, gym.spaces.Dict): + return _dict(space) + elif isinstance(space, gym.spaces.Text): + return _text(space) + elif isinstance(space, Simplex): + return _simplex(space) + elif isinstance(space, Repeated): + return _repeated(space) + elif isinstance(space, FlexDict): + return _flex_dict(space) + else: + raise ValueError("Unknown space type for serialization, ", type(space)) + + +@DeveloperAPI +def space_to_dict(space: gym.spaces.Space) -> Dict: + d = {"space": gym_space_to_dict(space)} + if "original_space" in space.__dict__: + d["original_space"] = space_to_dict(space.original_space) + return d + + +@DeveloperAPI +def gym_space_from_dict(d: Dict) -> gym.spaces.Space: + """De-serialize a dict into gym Space. + + Args: + str: serialized JSON str. + + Returns: + De-serialized gym space. + """ + if d is None: + return None + + def __common(d: Dict): + """Common updates to the dict before we use it to construct spaces""" + ret = d.copy() + del ret["space"] + if "dtype" in ret: + ret["dtype"] = np.dtype(ret["dtype"]) + return ret + + def _box(d: Dict) -> gym.spaces.Box: + ret = d.copy() + ret.update( + { + "low": _deserialize_ndarray(d["low"]), + "high": _deserialize_ndarray(d["high"]), + } + ) + return gym.spaces.Box(**__common(ret)) + + def _discrete(d: Dict) -> gym.spaces.Discrete: + return gym.spaces.Discrete(**__common(d)) + + def _multi_binary(d: Dict) -> gym.spaces.MultiBinary: + return gym.spaces.MultiBinary(**__common(d)) + + def _multi_discrete(d: Dict) -> gym.spaces.MultiDiscrete: + ret = d.copy() + ret.update( + { + "nvec": _deserialize_ndarray(ret["nvec"]), + } + ) + return gym.spaces.MultiDiscrete(**__common(ret)) + + def _tuple(d: Dict) -> gym.spaces.Discrete: + spaces = [gym_space_from_dict(sp) for sp in d["spaces"]] + return gym.spaces.Tuple(spaces=spaces) + + def _dict(d: Dict) -> gym.spaces.Discrete: + # We need to always use an OrderedDict here to cover the following two ways, by + # which a user might construct a Dict space originally. We need to restore this + # original Dict space with the exact order of keys the user intended to. + # - User provides an OrderedDict inside the gym.spaces.Dict constructor -> + # gymnasium should NOT further sort the keys. The same (user-provided) order + # must be restored. + # - User provides a simple dict inside the gym.spaces.Dict constructor -> + # By its API definition, gymnasium automatically sorts all keys alphabetically. + # The same (alphabetical) order must thus be restored. + spaces = OrderedDict( + {k: gym_space_from_dict(sp) for k, sp in d["spaces"].items()} + ) + return gym.spaces.Dict(spaces=spaces) + + def _simplex(d: Dict) -> Simplex: + return Simplex(**__common(d)) + + def _repeated(d: Dict) -> Repeated: + child_space = gym_space_from_dict(d["child_space"]) + return Repeated(child_space=child_space, max_len=d["max_len"]) + + def _flex_dict(d: Dict) -> FlexDict: + spaces = {k: gym_space_from_dict(s) for k, s in d.items() if k != "space"} + return FlexDict(spaces=spaces) + + def _text(d: Dict) -> "gym.spaces.Text": + return gym.spaces.Text(**__common(d)) + + space_map = { + "box": _box, + "discrete": _discrete, + "multi-binary": _multi_binary, + "multi-discrete": _multi_discrete, + "tuple": _tuple, + "dict": _dict, + "simplex": _simplex, + "repeated": _repeated, + "flex_dict": _flex_dict, + "text": _text, + } + + space_type = d["space"] + if space_type not in space_map: + raise ValueError("Unknown space type for de-serialization, ", space_type) + + return space_map[space_type](d) + + +@DeveloperAPI +def space_from_dict(d: Dict) -> gym.spaces.Space: + space = gym_space_from_dict(d["space"]) + if "original_space" in d: + assert "space" in d["original_space"] + if isinstance(d["original_space"]["space"], str): + # For backward compatibility reasons, if d["original_space"]["space"] + # is a string, this original space was serialized by gym_space_to_dict. + space.original_space = gym_space_from_dict(d["original_space"]) + else: + # Otherwise, this original space was serialized by space_to_dict. + space.original_space = space_from_dict(d["original_space"]) + return space + + +@DeveloperAPI +def check_if_args_kwargs_serializable(args: Sequence[Any], kwargs: Dict[str, Any]): + """Check if parameters to a function are serializable by ray. + + Args: + args: arguments to be checked. + kwargs: keyword arguments to be checked. + + Raises: + NoteSerializable if either args are kwargs are not serializable + by ray. + """ + for arg in args: + try: + # if the object is truly serializable we should be able to + # ray.put and ray.get it. + ray.get(ray.put(arg)) + except TypeError as e: + raise NotSerializable( + "RLModule constructor arguments must be serializable. " + f"Found non-serializable argument: {arg}.\n" + f"Original serialization error: {e}" + ) + for k, v in kwargs.items(): + try: + # if the object is truly serializable we should be able to + # ray.put and ray.get it. + ray.get(ray.put(v)) + except TypeError as e: + raise NotSerializable( + "RLModule constructor arguments must be serializable. " + f"Found non-serializable keyword argument: {k} = {v}.\n" + f"Original serialization error: {e}" + ) + + +@DeveloperAPI +def serialize_type(type_: Union[Type, str]) -> str: + """Converts a type into its full classpath ([module file] + "." + [class name]). + + Args: + type_: The type to convert. + + Returns: + The full classpath of the given type, e.g. "ray.rllib.algorithms.ppo.PPOConfig". + """ + # TODO (avnishn): find a way to incorporate the tune registry here. + # Already serialized. + if isinstance(type_, str): + return type_ + + return type_.__module__ + "." + type_.__qualname__ + + +@DeveloperAPI +def deserialize_type( + module: Union[str, Type], error: bool = False +) -> Optional[Union[str, Type]]: + """Resolves a class path to a class. + If the given module is already a class, it is returned as is. + If the given module is a string, it is imported and the class is returned. + + Args: + module: The classpath (str) or type to resolve. + error: Whether to throw a ValueError if `module` could not be resolved into + a class. If False and `module` is not resolvable, returns None. + + Returns: + The resolved class or `module` (if `error` is False and no resolution possible). + + Raises: + ValueError: If `error` is True and `module` cannot be resolved. + """ + # Already a class, return as-is. + if isinstance(module, type): + return module + # A string. + elif isinstance(module, str): + # Try interpreting (as classpath) and importing the given module. + try: + module_path, class_name = module.rsplit(".", 1) + module = importlib.import_module(module_path) + return getattr(module, class_name) + # Module not found OR not a module (but a registered string?). + except (ModuleNotFoundError, ImportError, AttributeError, ValueError) as e: + # Ignore if error=False. + if error: + raise ValueError( + f"Could not deserialize the given classpath `module={module}` into " + "a valid python class! Make sure you have all necessary pip " + "packages installed and all custom modules are in your " + "`PYTHONPATH` env variable." + ) from e + else: + raise ValueError(f"`module` ({module} must be type or string (classpath)!") + + return module diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/sgd.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..3e126c0a2f450147d9d3f9653ae0b320da778ac7 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/sgd.py @@ -0,0 +1,136 @@ +"""Utils for minibatch SGD across multiple RLlib policies.""" + +import logging +import numpy as np +import random + +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch +from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder + +logger = logging.getLogger(__name__) + + +@OldAPIStack +def standardized(array: np.ndarray): + """Normalize the values in an array. + + Args: + array (np.ndarray): Array of values to normalize. + + Returns: + array with zero mean and unit standard deviation. + """ + return (array - array.mean()) / max(1e-4, array.std()) + + +@OldAPIStack +def minibatches(samples: SampleBatch, sgd_minibatch_size: int, shuffle: bool = True): + """Return a generator yielding minibatches from a sample batch. + + Args: + samples: SampleBatch to split up. + sgd_minibatch_size: Size of minibatches to return. + shuffle: Whether to shuffle the order of the generated minibatches. + Note that in case of a non-recurrent policy, the incoming batch + is globally shuffled first regardless of this setting, before + the minibatches are generated from it! + + Yields: + SampleBatch: Each of size `sgd_minibatch_size`. + """ + if not sgd_minibatch_size: + yield samples + return + + if isinstance(samples, MultiAgentBatch): + raise NotImplementedError( + "Minibatching not implemented for multi-agent in simple mode" + ) + + if "state_in_0" not in samples and "state_out_0" not in samples: + samples.shuffle() + + all_slices = samples._get_slice_indices(sgd_minibatch_size) + data_slices, state_slices = all_slices + + if len(state_slices) == 0: + if shuffle: + random.shuffle(data_slices) + for i, j in data_slices: + yield samples[i:j] + else: + all_slices = list(zip(data_slices, state_slices)) + if shuffle: + # Make sure to shuffle data and states while linked together. + random.shuffle(all_slices) + for (i, j), (si, sj) in all_slices: + yield samples.slice(i, j, si, sj) + + +@OldAPIStack +def do_minibatch_sgd( + samples, + policies, + local_worker, + num_sgd_iter, + sgd_minibatch_size, + standardize_fields, +): + """Execute minibatch SGD. + + Args: + samples: Batch of samples to optimize. + policies: Dictionary of policies to optimize. + local_worker: Master rollout worker instance. + num_sgd_iter: Number of epochs of optimization to take. + sgd_minibatch_size: Size of minibatches to use for optimization. + standardize_fields: List of sample field names that should be + normalized prior to optimization. + + Returns: + averaged info fetches over the last SGD epoch taken. + """ + + # Handle everything as if multi-agent. + samples = samples.as_multi_agent() + + # Use LearnerInfoBuilder as a unified way to build the final + # results dict from `learn_on_loaded_batch` call(s). + # This makes sure results dicts always have the same structure + # no matter the setup (multi-GPU, multi-agent, minibatch SGD, + # tf vs torch). + learner_info_builder = LearnerInfoBuilder(num_devices=1) + for policy_id, policy in policies.items(): + if policy_id not in samples.policy_batches: + continue + + batch = samples.policy_batches[policy_id] + for field in standardize_fields: + batch[field] = standardized(batch[field]) + + # Check to make sure that the sgd_minibatch_size is not smaller + # than max_seq_len otherwise this will cause indexing errors while + # performing sgd when using a RNN or Attention model + if ( + policy.is_recurrent() + and policy.config["model"]["max_seq_len"] > sgd_minibatch_size + ): + raise ValueError( + "`sgd_minibatch_size` ({}) cannot be smaller than" + "`max_seq_len` ({}).".format( + sgd_minibatch_size, policy.config["model"]["max_seq_len"] + ) + ) + + for i in range(num_sgd_iter): + for minibatch in minibatches(batch, sgd_minibatch_size): + results = ( + local_worker.learn_on_batch( + MultiAgentBatch({policy_id: minibatch}, minibatch.count) + ) + )[policy_id] + learner_info_builder.add_learn_on_batch_results(results, policy_id) + + learner_info = learner_info_builder.finalize() + return learner_info diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..c1865756d89303d13c724cc1b9ff3cb4b781d400 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py @@ -0,0 +1,65 @@ +import numpy as np + +from ray.rllib.utils.typing import TensorType +from ray.rllib.utils.framework import try_import_torch, try_import_tf +from ray.util.annotations import PublicAPI + +torch, _ = try_import_torch() +_, tf, _ = try_import_tf() + + +# Dict of NumPy dtype -> torch dtype +if torch: + numpy_to_torch_dtype_dict = { + np.bool_: torch.bool, + np.uint8: torch.uint8, + np.int8: torch.int8, + np.int16: torch.int16, + np.int32: torch.int32, + np.int64: torch.int64, + np.float16: torch.float16, + np.float32: torch.float32, + np.float64: torch.float64, + np.complex64: torch.complex64, + np.complex128: torch.complex128, + } +else: + numpy_to_torch_dtype_dict = {} + +# Dict of NumPy dtype -> tf dtype +if tf: + numpy_to_tf_dtype_dict = { + np.bool_: tf.bool, + np.uint8: tf.uint8, + np.int8: tf.int8, + np.int16: tf.int16, + np.int32: tf.int32, + np.int64: tf.int64, + np.float16: tf.float16, + np.float32: tf.float32, + np.float64: tf.float64, + np.complex64: tf.complex64, + np.complex128: tf.complex128, + } +else: + numpy_to_tf_dtype_dict = {} + +# Dict of torch dtype -> NumPy dtype +torch_to_numpy_dtype_dict = { + value: key for (key, value) in numpy_to_torch_dtype_dict.items() +} +# Dict of tf dtype -> NumPy dtype +tf_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_tf_dtype_dict.items()} + + +@PublicAPI(stability="alpha") +def get_np_dtype(x: TensorType) -> np.dtype: + """Returns the NumPy dtype of the given tensor or array.""" + if torch and isinstance(x, torch.Tensor): + return torch_to_numpy_dtype_dict[x.dtype] + if tf and isinstance(x, tf.Tensor): + return tf_to_numpy_dtype_dict[x.dtype] + elif isinstance(x, np.ndarray): + return x.dtype + else: + raise TypeError("Unsupported type: {}".format(type(x))) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0973188d88487b74ae1f42f051eb06aa88742bb1 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py @@ -0,0 +1,1817 @@ +import argparse +import json +import logging +import os +import pprint +import random +import re +import time +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Optional, + Tuple, + Type, + Union, +) + +import gymnasium as gym +from gymnasium.spaces import Box, Discrete, MultiDiscrete, MultiBinary +from gymnasium.spaces import Dict as GymDict +from gymnasium.spaces import Tuple as GymTuple +import numpy as np +import tree # pip install dm_tree + +import ray +from ray import air, tune +from ray.air.constants import TRAINING_ITERATION +from ray.air.integrations.wandb import WandbLoggerCallback, WANDB_ENV_VAR +from ray.rllib.core import DEFAULT_MODULE_ID, Columns +from ray.rllib.env.wrappers.atari_wrappers import is_atari, wrap_deepmind +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch +from ray.rllib.utils.metrics import ( + DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY, + ENV_RUNNER_RESULTS, + EPISODE_RETURN_MEAN, + EVALUATION_RESULTS, + NUM_ENV_STEPS_TRAINED, + NUM_ENV_STEPS_SAMPLED_LIFETIME, +) +from ray.rllib.utils.typing import ResultDict +from ray.rllib.utils.error import UnsupportedSpaceException + + +from ray.tune import CLIReporter + + +if TYPE_CHECKING: + from ray.rllib.algorithms import Algorithm, AlgorithmConfig + from ray.rllib.offline.dataset_reader import DatasetReader + +jax, _ = try_import_jax() +tf1, tf, tfv = try_import_tf() +torch, _ = try_import_torch() + +logger = logging.getLogger(__name__) + + +def add_rllib_example_script_args( + parser: Optional[argparse.ArgumentParser] = None, + default_reward: float = 100.0, + default_iters: int = 200, + default_timesteps: int = 100000, +) -> argparse.ArgumentParser: + """Adds RLlib-typical (and common) examples scripts command line args to a parser. + + TODO (sven): This function should be used by most of our examples scripts, which + already mostly have this logic in them (but written out). + + Args: + parser: The parser to add the arguments to. If None, create a new one. + default_reward: The default value for the --stop-reward option. + default_iters: The default value for the --stop-iters option. + default_timesteps: The default value for the --stop-timesteps option. + + Returns: + The altered (or newly created) parser object. + """ + if parser is None: + parser = argparse.ArgumentParser() + + # Algo and Algo config options. + parser.add_argument( + "--algo", type=str, default="PPO", help="The RLlib-registered algorithm to use." + ) + parser.add_argument( + "--enable-new-api-stack", + action="store_true", + help="Whether to use the `enable_rl_module_and_learner` config setting.", + ) + parser.add_argument( + "--framework", + choices=["tf", "tf2", "torch"], + default="torch", + help="The DL framework specifier.", + ) + parser.add_argument( + "--env", + type=str, + default=None, + help="The gym.Env identifier to run the experiment with.", + ) + parser.add_argument( + "--num-env-runners", + type=int, + default=None, + help="The number of (remote) EnvRunners to use for the experiment.", + ) + parser.add_argument( + "--num-envs-per-env-runner", + type=int, + default=None, + help="The number of (vectorized) environments per EnvRunner. Note that " + "this is identical to the batch size for (inference) action computations.", + ) + parser.add_argument( + "--num-agents", + type=int, + default=0, + help="If 0 (default), will run as single-agent. If > 0, will run as " + "multi-agent with the environment simply cloned n times and each agent acting " + "independently at every single timestep. The overall reward for this " + "experiment is then the sum over all individual agents' rewards.", + ) + + # Evaluation options. + parser.add_argument( + "--evaluation-num-env-runners", + type=int, + default=0, + help="The number of evaluation (remote) EnvRunners to use for the experiment.", + ) + parser.add_argument( + "--evaluation-interval", + type=int, + default=0, + help="Every how many iterations to run one round of evaluation. " + "Use 0 (default) to disable evaluation.", + ) + parser.add_argument( + "--evaluation-duration", + type=lambda v: v if v == "auto" else int(v), + default=10, + help="The number of evaluation units to run each evaluation round. " + "Use `--evaluation-duration-unit` to count either in 'episodes' " + "or 'timesteps'. If 'auto', will run as many as possible during train pass (" + "`--evaluation-parallel-to-training` must be set then).", + ) + parser.add_argument( + "--evaluation-duration-unit", + type=str, + default="episodes", + choices=["episodes", "timesteps"], + help="The evaluation duration unit to count by. One of 'episodes' or " + "'timesteps'. This unit will be run `--evaluation-duration` times in each " + "evaluation round. If `--evaluation-duration=auto`, this setting does not " + "matter.", + ) + parser.add_argument( + "--evaluation-parallel-to-training", + action="store_true", + help="Whether to run evaluation parallel to training. This might help speed up " + "your overall iteration time. Be aware that when using this option, your " + "reported evaluation results are referring to one iteration before the current " + "one.", + ) + + # RLlib logging options. + parser.add_argument( + "--output", + type=str, + default=None, + help="The output directory to write trajectories to, which are collected by " + "the algo's EnvRunners.", + ) + parser.add_argument( + "--log-level", + type=str, + default=None, # None -> use default + choices=["INFO", "DEBUG", "WARN", "ERROR"], + help="The log-level to be used by the RLlib logger.", + ) + + # tune.Tuner options. + parser.add_argument( + "--no-tune", + action="store_true", + help="Whether to NOT use tune.Tuner(), but rather a simple for-loop calling " + "`algo.train()` repeatedly until one of the stop criteria is met.", + ) + parser.add_argument( + "--num-samples", + type=int, + default=1, + help="How many (tune.Tuner.fit()) experiments to execute - if possible in " + "parallel.", + ) + parser.add_argument( + "--max-concurrent-trials", + type=int, + default=None, + help="How many (tune.Tuner) trials to run concurrently.", + ) + parser.add_argument( + "--verbose", + type=int, + default=2, + help="The verbosity level for the `tune.Tuner()` running the experiment.", + ) + parser.add_argument( + "--checkpoint-freq", + type=int, + default=0, + help=( + "The frequency (in training iterations) with which to create checkpoints. " + "Note that if --wandb-key is provided, all checkpoints will " + "automatically be uploaded to WandB." + ), + ) + parser.add_argument( + "--checkpoint-at-end", + action="store_true", + help=( + "Whether to create a checkpoint at the very end of the experiment. " + "Note that if --wandb-key is provided, all checkpoints will " + "automatically be uploaded to WandB." + ), + ) + + # WandB logging options. + parser.add_argument( + "--wandb-key", + type=str, + default=None, + help="The WandB API key to use for uploading results.", + ) + parser.add_argument( + "--wandb-project", + type=str, + default=None, + help="The WandB project name to use.", + ) + parser.add_argument( + "--wandb-run-name", + type=str, + default=None, + help="The WandB run name to use.", + ) + + # Experiment stopping and testing criteria. + parser.add_argument( + "--stop-reward", + type=float, + default=default_reward, + help="Reward at which the script should stop training.", + ) + parser.add_argument( + "--stop-iters", + type=int, + default=default_iters, + help="The number of iterations to train.", + ) + parser.add_argument( + "--stop-timesteps", + type=int, + default=default_timesteps, + help="The number of (environment sampling) timesteps to train.", + ) + parser.add_argument( + "--as-test", + action="store_true", + help="Whether this script should be run as a test. If set, --stop-reward must " + "be achieved within --stop-timesteps AND --stop-iters, otherwise this " + "script will throw an exception at the end.", + ) + parser.add_argument( + "--as-release-test", + action="store_true", + help="Whether this script should be run as a release test. If set, " + "all that applies to the --as-test option is true, plus, a short JSON summary " + "will be written into a results file whose location is given by the ENV " + "variable `TEST_OUTPUT_JSON`.", + ) + + # Learner scaling options. + parser.add_argument( + "--num-learners", + type=int, + default=None, + help="The number of Learners to use. If none, use the algorithm's default " + "value.", + ) + parser.add_argument( + "--num-gpus-per-learner", + type=float, + default=None, + help="The number of GPUs per Learner to use. If none and there are enough GPUs " + "for all required Learners (--num-learners), use a value of 1, otherwise 0.", + ) + + # Ray init options. + parser.add_argument("--num-cpus", type=int, default=0) + parser.add_argument( + "--local-mode", + action="store_true", + help="Init Ray in local mode for easier debugging.", + ) + + # Old API stack: config.num_gpus. + parser.add_argument( + "--num-gpus", + type=int, + default=0, + help="The number of GPUs to use (if on the old API stack).", + ) + + return parser + + +def check(x, y, decimals=5, atol=None, rtol=None, false=False): + """ + Checks two structures (dict, tuple, list, + np.array, float, int, etc..) for (almost) numeric identity. + All numbers in the two structures have to match up to `decimal` digits + after the floating point. Uses assertions. + + Args: + x: The value to be compared (to the expectation: `y`). This + may be a Tensor. + y: The expected value to be compared to `x`. This must not + be a tf-Tensor, but may be a tf/torch-Tensor. + decimals: The number of digits after the floating point up to + which all numeric values have to match. + atol: Absolute tolerance of the difference between x and y + (overrides `decimals` if given). + rtol: Relative tolerance of the difference between x and y + (overrides `decimals` if given). + false: Whether to check that x and y are NOT the same. + """ + # A dict type. + if isinstance(x, dict): + assert isinstance(y, dict), "ERROR: If x is dict, y needs to be a dict as well!" + y_keys = set(x.keys()) + for key, value in x.items(): + assert key in y, f"ERROR: y does not have x's key='{key}'! y={y}" + check(value, y[key], decimals=decimals, atol=atol, rtol=rtol, false=false) + y_keys.remove(key) + assert not y_keys, "ERROR: y contains keys ({}) that are not in x! y={}".format( + list(y_keys), y + ) + # A tuple type. + elif isinstance(x, (tuple, list)): + assert isinstance( + y, (tuple, list) + ), "ERROR: If x is tuple/list, y needs to be a tuple/list as well!" + assert len(y) == len( + x + ), "ERROR: y does not have the same length as x ({} vs {})!".format( + len(y), len(x) + ) + for i, value in enumerate(x): + check(value, y[i], decimals=decimals, atol=atol, rtol=rtol, false=false) + # Boolean comparison. + elif isinstance(x, (np.bool_, bool)): + if false is True: + assert bool(x) is not bool(y), f"ERROR: x ({x}) is y ({y})!" + else: + assert bool(x) is bool(y), f"ERROR: x ({x}) is not y ({y})!" + # Nones or primitives (excluding int vs float, which should be compared with + # tolerance/decimals as well). + elif ( + x is None + or y is None + or isinstance(x, str) + or (isinstance(x, int) and isinstance(y, int)) + ): + if false is True: + assert x != y, f"ERROR: x ({x}) is the same as y ({y})!" + else: + assert x == y, f"ERROR: x ({x}) is not the same as y ({y})!" + # String/byte comparisons. + elif ( + hasattr(x, "dtype") and (x.dtype == object or str(x.dtype).startswith(" raise error (not expected to be equal). + if false is True: + assert False, f"ERROR: x ({x}) is the same as y ({y})!" + + # Using atol/rtol. + else: + # Provide defaults for either one of atol/rtol. + if atol is None: + atol = 0 + if rtol is None: + rtol = 1e-7 + try: + np.testing.assert_allclose(x, y, atol=atol, rtol=rtol) + except AssertionError as e: + if false is False: + raise e + else: + if false is True: + assert False, f"ERROR: x ({x}) is the same as y ({y})!" + + +def check_compute_single_action( + algorithm, include_state=False, include_prev_action_reward=False +): + """Tests different combinations of args for algorithm.compute_single_action. + + Args: + algorithm: The Algorithm object to test. + include_state: Whether to include the initial state of the Policy's + Model in the `compute_single_action` call. + include_prev_action_reward: Whether to include the prev-action and + -reward in the `compute_single_action` call. + + Raises: + ValueError: If anything unexpected happens. + """ + # Have to import this here to avoid circular dependency. + from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch + + # Some Algorithms may not abide to the standard API. + pid = DEFAULT_POLICY_ID + try: + # Multi-agent: Pick any learnable policy (or DEFAULT_POLICY if it's the only + # one). + pid = next(iter(algorithm.env_runner.get_policies_to_train())) + pol = algorithm.get_policy(pid) + except AttributeError: + pol = algorithm.policy + # Get the policy's model. + model = pol.model + + action_space = pol.action_space + + def _test( + what, method_to_test, obs_space, full_fetch, explore, timestep, unsquash, clip + ): + call_kwargs = {} + if what is algorithm: + call_kwargs["full_fetch"] = full_fetch + call_kwargs["policy_id"] = pid + + obs = obs_space.sample() + if isinstance(obs_space, Box): + obs = np.clip(obs, -1.0, 1.0) + state_in = None + if include_state: + state_in = model.get_initial_state() + if not state_in: + state_in = [] + i = 0 + while f"state_in_{i}" in model.view_requirements: + state_in.append( + model.view_requirements[f"state_in_{i}"].space.sample() + ) + i += 1 + action_in = action_space.sample() if include_prev_action_reward else None + reward_in = 1.0 if include_prev_action_reward else None + + if method_to_test == "input_dict": + assert what is pol + + input_dict = {SampleBatch.OBS: obs} + if include_prev_action_reward: + input_dict[SampleBatch.PREV_ACTIONS] = action_in + input_dict[SampleBatch.PREV_REWARDS] = reward_in + if state_in: + if what.config.get("enable_rl_module_and_learner", False): + input_dict["state_in"] = state_in + else: + for i, s in enumerate(state_in): + input_dict[f"state_in_{i}"] = s + input_dict_batched = SampleBatch( + tree.map_structure(lambda s: np.expand_dims(s, 0), input_dict) + ) + action = pol.compute_actions_from_input_dict( + input_dict=input_dict_batched, + explore=explore, + timestep=timestep, + **call_kwargs, + ) + # Unbatch everything to be able to compare against single + # action below. + # ARS and ES return action batches as lists. + if isinstance(action[0], list): + action = (np.array(action[0]), action[1], action[2]) + action = tree.map_structure(lambda s: s[0], action) + + try: + action2 = pol.compute_single_action( + input_dict=input_dict, + explore=explore, + timestep=timestep, + **call_kwargs, + ) + # Make sure these are the same, unless we have exploration + # switched on (or noisy layers). + if not explore and not pol.config.get("noisy"): + check(action, action2) + except TypeError: + pass + else: + action = what.compute_single_action( + obs, + state_in, + prev_action=action_in, + prev_reward=reward_in, + explore=explore, + timestep=timestep, + unsquash_action=unsquash, + clip_action=clip, + **call_kwargs, + ) + + state_out = None + if state_in or full_fetch or what is pol: + action, state_out, _ = action + if state_out: + for si, so in zip(tree.flatten(state_in), tree.flatten(state_out)): + if tf.is_tensor(si): + # If si is a tensor of Dimensions, we need to convert it + # We expect this to be the case for TF RLModules who's initial + # states are Tf Tensors. + si_shape = si.shape.as_list() + else: + si_shape = list(si.shape) + check(si_shape, so.shape) + + if unsquash is None: + unsquash = what.config["normalize_actions"] + if clip is None: + clip = what.config["clip_actions"] + + # Test whether unsquash/clipping works on the Algorithm's + # compute_single_action method: Both flags should force the action + # to be within the space's bounds. + if method_to_test == "single" and what == algorithm: + if not action_space.contains(action) and ( + clip or unsquash or not isinstance(action_space, Box) + ): + raise ValueError( + f"Returned action ({action}) of algorithm/policy {what} " + f"not in Env's action_space {action_space}" + ) + # We are operating in normalized space: Expect only smaller action + # values. + if ( + isinstance(action_space, Box) + and not unsquash + and what.config.get("normalize_actions") + and np.any(np.abs(action) > 15.0) + ): + raise ValueError( + f"Returned action ({action}) of algorithm/policy {what} " + "should be in normalized space, but seems too large/small " + "for that!" + ) + + # Loop through: Policy vs Algorithm; Different API methods to calculate + # actions; unsquash option; clip option; full fetch or not. + for what in [pol, algorithm]: + if what is algorithm: + # Get the obs-space from Workers.env (not Policy) due to possible + # pre-processor up front. + worker_set = getattr(algorithm, "env_runner_group", None) + assert worker_set + if not worker_set.local_env_runner: + obs_space = algorithm.get_policy(pid).observation_space + else: + obs_space = worker_set.local_env_runner.for_policy( + lambda p: p.observation_space, policy_id=pid + ) + obs_space = getattr(obs_space, "original_space", obs_space) + else: + obs_space = pol.observation_space + + for method_to_test in ["single"] + (["input_dict"] if what is pol else []): + for explore in [True, False]: + for full_fetch in [False, True] if what is algorithm else [False]: + timestep = random.randint(0, 100000) + for unsquash in [True, False, None]: + for clip in [False] if unsquash else [True, False, None]: + print("-" * 80) + print(f"what={what}") + print(f"method_to_test={method_to_test}") + print(f"explore={explore}") + print(f"full_fetch={full_fetch}") + print(f"unsquash={unsquash}") + print(f"clip={clip}") + _test( + what, + method_to_test, + obs_space, + full_fetch, + explore, + timestep, + unsquash, + clip, + ) + + +def check_inference_w_connectors(policy, env_name, max_steps: int = 100): + """Checks whether the given policy can infer actions from an env with connectors. + + Args: + policy: The policy to check. + env_name: Name of the environment to check + max_steps: The maximum number of steps to run the environment for. + + Raises: + ValueError: If the policy cannot infer actions from the environment. + """ + # Avoids circular import + from ray.rllib.utils.policy import local_policy_inference + + env = gym.make(env_name) + + # Potentially wrap the env like we do in RolloutWorker + if is_atari(env): + env = wrap_deepmind( + env, + dim=policy.config["model"]["dim"], + framestack=policy.config["model"].get("framestack"), + ) + + obs, info = env.reset() + reward, terminated, truncated = 0.0, False, False + ts = 0 + while not terminated and not truncated and ts < max_steps: + action_out = local_policy_inference( + policy, + env_id=0, + agent_id=0, + obs=obs, + reward=reward, + terminated=terminated, + truncated=truncated, + info=info, + ) + obs, reward, terminated, truncated, info = env.step(action_out[0][0]) + + ts += 1 + + +def check_learning_achieved( + tune_results: "tune.ResultGrid", + min_value: float, + evaluation: Optional[bool] = None, + metric: str = f"{ENV_RUNNER_RESULTS}/episode_return_mean", +): + """Throws an error if `min_reward` is not reached within tune_results. + + Checks the last iteration found in tune_results for its + "episode_return_mean" value and compares it to `min_reward`. + + Args: + tune_results: The tune.Tuner().fit() returned results object. + min_reward: The min reward that must be reached. + evaluation: If True, use `evaluation/env_runners/[metric]`, if False, use + `env_runners/[metric]`, if None, use evaluation sampler results if + available otherwise, use train sampler results. + + Raises: + ValueError: If `min_reward` not reached. + """ + # Get maximum value of `metrics` over all trials + # (check if at least one trial achieved some learning, not just the final one). + recorded_values = [] + for _, row in tune_results.get_dataframe().iterrows(): + if evaluation or ( + evaluation is None and f"{EVALUATION_RESULTS}/{metric}" in row + ): + recorded_values.append(row[f"{EVALUATION_RESULTS}/{metric}"]) + else: + recorded_values.append(row[metric]) + best_value = max(recorded_values) + if best_value < min_value: + raise ValueError(f"`{metric}` of {min_value} not reached!") + print(f"`{metric}` of {min_value} reached! ok") + + +def check_off_policyness( + results: ResultDict, + upper_limit: float, + lower_limit: float = 0.0, +) -> Optional[float]: + """Verifies that the off-policy'ness of some update is within some range. + + Off-policy'ness is defined as the average (across n workers) diff + between the number of gradient updates performed on the policy used + for sampling vs the number of gradient updates that have been performed + on the trained policy (usually the one on the local worker). + + Uses the published DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY metric inside + a training results dict and compares to the given bounds. + + Note: Only works with single-agent results thus far. + + Args: + results: The training results dict. + upper_limit: The upper limit to for the off_policy_ness value. + lower_limit: The lower limit to for the off_policy_ness value. + + Returns: + The off-policy'ness value (described above). + + Raises: + AssertionError: If the value is out of bounds. + """ + + # Have to import this here to avoid circular dependency. + from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID + from ray.rllib.utils.metrics.learner_info import LEARNER_INFO + + # Assert that the off-policy'ness is within the given bounds. + learner_info = results["info"][LEARNER_INFO] + if DEFAULT_POLICY_ID not in learner_info: + return None + off_policy_ness = learner_info[DEFAULT_POLICY_ID][ + DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY + ] + # Roughly: Reaches up to 0.4 for 2 rollout workers and up to 0.2 for + # 1 rollout worker. + if not (lower_limit <= off_policy_ness <= upper_limit): + raise AssertionError( + f"`off_policy_ness` ({off_policy_ness}) is outside the given bounds " + f"({lower_limit} - {upper_limit})!" + ) + + return off_policy_ness + + +def check_train_results_new_api_stack(train_results: ResultDict) -> None: + """Checks proper structure of a Algorithm.train() returned dict. + + Args: + train_results: The train results dict to check. + + Raises: + AssertionError: If `train_results` doesn't have the proper structure or + data in it. + """ + # Import these here to avoid circular dependencies. + from ray.rllib.utils.metrics import ( + ENV_RUNNER_RESULTS, + FAULT_TOLERANCE_STATS, + LEARNER_RESULTS, + TIMERS, + ) + + # Assert that some keys are where we would expect them. + for key in [ + ENV_RUNNER_RESULTS, + FAULT_TOLERANCE_STATS, + LEARNER_RESULTS, + TIMERS, + TRAINING_ITERATION, + "config", + ]: + assert ( + key in train_results + ), f"'{key}' not found in `train_results` ({train_results})!" + + # Make sure, `config` is an actual dict, not an AlgorithmConfig object. + assert isinstance( + train_results["config"], dict + ), "`config` in results not a python dict!" + + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + + is_multi_agent = ( + AlgorithmConfig() + .update_from_dict({"policies": train_results["config"]["policies"]}) + .is_multi_agent() + ) + + # Check in particular the "info" dict. + learner_results = train_results[LEARNER_RESULTS] + + # Make sure we have a `DEFAULT_MODULE_ID key if we are not in a + # multi-agent setup. + if not is_multi_agent: + assert len(learner_results) == 0 or DEFAULT_MODULE_ID in learner_results, ( + f"'{DEFAULT_MODULE_ID}' not found in " + f"train_results['{LEARNER_RESULTS}']!" + ) + + for module_id, module_metrics in learner_results.items(): + # The ModuleID can be __all_modules__ in multi-agent case when the new learner + # stack is enabled. + if module_id == "__all_modules__": + continue + + # On the new API stack, policy has no LEARNER_STATS_KEY under it anymore. + for key, value in module_metrics.items(): + # Min- and max-stats should be single values. + if key.endswith("_min") or key.endswith("_max"): + assert np.isscalar(value), f"'key' value not a scalar ({value})!" + + return train_results + + +@OldAPIStack +def check_train_results(train_results: ResultDict): + """Checks proper structure of a Algorithm.train() returned dict. + + Args: + train_results: The train results dict to check. + + Raises: + AssertionError: If `train_results` doesn't have the proper structure or + data in it. + """ + # Import these here to avoid circular dependencies. + from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID + from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY + + # Assert that some keys are where we would expect them. + for key in [ + "config", + "custom_metrics", + ENV_RUNNER_RESULTS, + "info", + "iterations_since_restore", + "num_healthy_workers", + "perf", + "time_since_restore", + "time_this_iter_s", + "timers", + "time_total_s", + TRAINING_ITERATION, + ]: + assert ( + key in train_results + ), f"'{key}' not found in `train_results` ({train_results})!" + + for key in [ + "episode_len_mean", + "episode_reward_max", + "episode_reward_mean", + "episode_reward_min", + "hist_stats", + "policy_reward_max", + "policy_reward_mean", + "policy_reward_min", + "sampler_perf", + ]: + assert key in train_results[ENV_RUNNER_RESULTS], ( + f"'{key}' not found in `train_results[ENV_RUNNER_RESULTS]` " + f"({train_results[ENV_RUNNER_RESULTS]})!" + ) + + # Make sure, `config` is an actual dict, not an AlgorithmConfig object. + assert isinstance( + train_results["config"], dict + ), "`config` in results not a python dict!" + + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + + is_multi_agent = ( + AlgorithmConfig() + .update_from_dict({"policies": train_results["config"]["policies"]}) + .is_multi_agent() + ) + + # Check in particular the "info" dict. + info = train_results["info"] + assert LEARNER_INFO in info, f"'learner' not in train_results['infos'] ({info})!" + assert ( + "num_steps_trained" in info or NUM_ENV_STEPS_TRAINED in info + ), f"'num_(env_)?steps_trained' not in train_results['infos'] ({info})!" + + learner_info = info[LEARNER_INFO] + + # Make sure we have a default_policy key if we are not in a + # multi-agent setup. + if not is_multi_agent: + # APEX algos sometimes have an empty learner info dict (no metrics + # collected yet). + assert len(learner_info) == 0 or DEFAULT_POLICY_ID in learner_info, ( + f"'{DEFAULT_POLICY_ID}' not found in " + f"train_results['infos']['learner'] ({learner_info})!" + ) + + for pid, policy_stats in learner_info.items(): + if pid == "batch_count": + continue + + # the pid can be __all__ in multi-agent case when the new learner stack is + # enabled. + if pid == "__all__": + continue + + # On the new API stack, policy has no LEARNER_STATS_KEY under it anymore. + if LEARNER_STATS_KEY in policy_stats: + learner_stats = policy_stats[LEARNER_STATS_KEY] + else: + learner_stats = policy_stats + for key, value in learner_stats.items(): + # Min- and max-stats should be single values. + if key.startswith("min_") or key.startswith("max_"): + assert np.isscalar(value), f"'key' value not a scalar ({value})!" + + return train_results + + +# TODO (sven): Make this the de-facto, well documented, and unified utility for most of +# our tests: +# - CI (label: "learning_tests") +# - release tests (benchmarks) +# - example scripts +def run_rllib_example_script_experiment( + base_config: "AlgorithmConfig", + args: Optional[argparse.Namespace] = None, + *, + stop: Optional[Dict] = None, + success_metric: Optional[Dict] = None, + trainable: Optional[Type] = None, + tune_callbacks: Optional[List] = None, + keep_config: bool = False, + scheduler=None, + progress_reporter=None, +) -> Union[ResultDict, tune.result_grid.ResultGrid]: + """Given an algorithm config and some command line args, runs an experiment. + + There are some constraints on what properties must be defined in `args`. + It should ideally be generated via calling + `args = add_rllib_example_script_args()`, which can be found in this very module + here. + + The function sets up an Algorithm object from the given config (altered by the + contents of `args`), then runs the Algorithm via Tune (or manually, if + `args.no_tune` is set to True) using the stopping criteria in `stop`. + + At the end of the experiment, if `args.as_test` is True, checks, whether the + Algorithm reached the `success_metric` (if None, use `env_runners/ + episode_return_mean` with a minimum value of `args.stop_reward`). + + See https://github.com/ray-project/ray/tree/master/rllib/examples for an overview + of all supported command line options. + + Args: + base_config: The AlgorithmConfig object to use for this experiment. This base + config will be automatically "extended" based on some of the provided + `args`. For example, `args.num_env_runners` is used to set + `config.num_env_runners`, etc.. + args: A argparse.Namespace object, ideally returned by calling + `args = add_rllib_example_script_args()`. It must have the following + properties defined: `stop_iters`, `stop_reward`, `stop_timesteps`, + `no_tune`, `verbose`, `checkpoint_freq`, `as_test`. Optionally, for WandB + logging: `wandb_key`, `wandb_project`, `wandb_run_name`. + stop: An optional dict mapping ResultDict key strings (using "/" in case of + nesting, e.g. "env_runners/episode_return_mean" for referring to + `result_dict['env_runners']['episode_return_mean']` to minimum + values, reaching of which will stop the experiment). Default is: + { + "env_runners/episode_return_mean": args.stop_reward, + "training_iteration": args.stop_iters, + "num_env_steps_sampled_lifetime": args.stop_timesteps, + } + success_metric: Only relevant if `args.as_test` is True. + A dict mapping a single(!) ResultDict key string (using "/" in + case of nesting, e.g. "env_runners/episode_return_mean" for referring + to `result_dict['env_runners']['episode_return_mean']` to a single(!) + minimum value to be reached in order for the experiment to count as + successful. If `args.as_test` is True AND this `success_metric` is not + reached with the bounds defined by `stop`, will raise an Exception. + trainable: The Trainable sub-class to run in the tune.Tuner. If None (default), + use the registered RLlib Algorithm class specified by args.algo. + tune_callbacks: A list of Tune callbacks to configure with the tune.Tuner. + In case `args.wandb_key` is provided, appends a WandB logger to this + list. + keep_config: Set this to True, if you don't want this utility to change the + given `base_config` in any way and leave it as-is. This is helpful + for those example scripts which demonstrate how to set config settings + that are taken care of automatically in this function otherwise (e.g. + `num_env_runners`). + + Returns: + The last ResultDict from a --no-tune run OR the tune.Tuner.fit() + results. + """ + if args is None: + parser = add_rllib_example_script_args() + args = parser.parse_args() + + # If run --as-release-test, --as-test must also be set. + if args.as_release_test: + args.as_test = True + + # Initialize Ray. + ray.init( + num_cpus=args.num_cpus or None, + local_mode=args.local_mode, + ignore_reinit_error=True, + ) + + # Define one or more stopping criteria. + if stop is None: + stop = { + f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward, + f"{ENV_RUNNER_RESULTS}/{NUM_ENV_STEPS_SAMPLED_LIFETIME}": ( + args.stop_timesteps + ), + TRAINING_ITERATION: args.stop_iters, + } + + config = base_config + + # Enhance the `base_config`, based on provided `args`. + if not keep_config: + # Set the framework. + config.framework(args.framework) + + # Add an env specifier (only if not already set in config)? + if args.env is not None and config.env is None: + config.environment(args.env) + + # Enable the new API stack? + if args.enable_new_api_stack: + config.api_stack( + enable_rl_module_and_learner=True, + enable_env_runner_and_connector_v2=True, + ) + + # Define EnvRunner/RolloutWorker scaling and behavior. + if args.num_env_runners is not None: + config.env_runners(num_env_runners=args.num_env_runners) + + # Define compute resources used automatically (only using the --num-learners + # and --num-gpus-per-learner args). + # New stack. + if config.enable_rl_module_and_learner: + if args.num_gpus > 0: + raise ValueError( + "--num-gpus is not supported on the new API stack! To train on " + "GPUs, use the command line options `--num-gpus-per-learner=1` and " + "`--num-learners=[your number of available GPUs]`, instead." + ) + + # Do we have GPUs available in the cluster? + num_gpus_available = ray.cluster_resources().get("GPU", 0) + # Number of actual Learner instances (including the local Learner if + # `num_learners=0`). + num_actual_learners = ( + args.num_learners + if args.num_learners is not None + else config.num_learners + ) or 1 # 1: There is always a local Learner, if num_learners=0. + # How many were hard-requested by the user + # (through explicit `--num-gpus-per-learner >= 1`). + num_gpus_requested = (args.num_gpus_per_learner or 0) * num_actual_learners + # Number of GPUs needed, if `num_gpus_per_learner=None` (auto). + num_gpus_needed_if_available = ( + args.num_gpus_per_learner + if args.num_gpus_per_learner is not None + else 1 + ) * num_actual_learners + # Define compute resources used. + config.resources(num_gpus=0) # old API stack setting + if args.num_learners is not None: + config.learners(num_learners=args.num_learners) + + # User wants to use GPUs if available, but doesn't hard-require them. + if args.num_gpus_per_learner is None: + if num_gpus_available >= num_gpus_needed_if_available: + config.learners(num_gpus_per_learner=1) + else: + config.learners(num_gpus_per_learner=0, num_cpus_per_learner=1) + + # User hard-requires n GPUs, but they are not available -> Error. + elif num_gpus_available < num_gpus_requested: + raise ValueError( + "You are running your script with --num-learners=" + f"{args.num_learners} and --num-gpus-per-learner=" + f"{args.num_gpus_per_learner}, but your cluster only has " + f"{num_gpus_available} GPUs! Will run " + f"with {num_gpus_available} CPU Learners instead." + ) + + # All required GPUs are available -> Use them. + else: + config.learners(num_gpus_per_learner=args.num_gpus_per_learner) + + # Old stack. + else: + config.resources(num_gpus=args.num_gpus) + + # Evaluation setup. + if args.evaluation_interval > 0: + config.evaluation( + evaluation_num_env_runners=args.evaluation_num_env_runners, + evaluation_interval=args.evaluation_interval, + evaluation_duration=args.evaluation_duration, + evaluation_duration_unit=args.evaluation_duration_unit, + evaluation_parallel_to_training=args.evaluation_parallel_to_training, + ) + + # Set the log-level (if applicable). + if args.log_level is not None: + config.debugging(log_level=args.log_level) + + # Set the output dir (if applicable). + if args.output is not None: + config.offline_data(output=args.output) + + # Run the experiment w/o Tune (directly operate on the RLlib Algorithm object). + if args.no_tune: + assert not args.as_test and not args.as_release_test + algo = config.build() + for i in range(stop.get(TRAINING_ITERATION, args.stop_iters)): + results = algo.train() + if ENV_RUNNER_RESULTS in results: + mean_return = results[ENV_RUNNER_RESULTS].get( + EPISODE_RETURN_MEAN, np.nan + ) + print(f"iter={i} R={mean_return}", end="") + if EVALUATION_RESULTS in results: + Reval = results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][ + EPISODE_RETURN_MEAN + ] + print(f" R(eval)={Reval}", end="") + print() + for key, threshold in stop.items(): + val = results + for k in key.split("/"): + try: + val = val[k] + except KeyError: + val = None + break + if val is not None and not np.isnan(val) and val >= threshold: + print(f"Stop criterium ({key}={threshold}) fulfilled!") + ray.shutdown() + return results + + ray.shutdown() + return results + + # Run the experiment using Ray Tune. + + # Log results using WandB. + tune_callbacks = tune_callbacks or [] + if hasattr(args, "wandb_key") and ( + args.wandb_key is not None or WANDB_ENV_VAR in os.environ + ): + wandb_key = args.wandb_key or os.environ[WANDB_ENV_VAR] + project = args.wandb_project or ( + args.algo.lower() + "-" + re.sub("\\W+", "-", str(config.env).lower()) + ) + tune_callbacks.append( + WandbLoggerCallback( + api_key=wandb_key, + project=project, + upload_checkpoints=True, + **({"name": args.wandb_run_name} if args.wandb_run_name else {}), + ) + ) + + # Auto-configure a CLIReporter (to log the results to the console). + # Use better ProgressReporter for multi-agent cases: List individual policy rewards. + if progress_reporter is None and args.num_agents > 0: + progress_reporter = CLIReporter( + metric_columns={ + **{ + TRAINING_ITERATION: "iter", + "time_total_s": "total time (s)", + NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts", + f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "combined return", + }, + **{ + ( + f"{ENV_RUNNER_RESULTS}/module_episode_returns_mean/" f"{pid}" + ): f"return {pid}" + for pid in config.policies + }, + }, + ) + + # Force Tuner to use old progress output as the new one silently ignores our custom + # `CLIReporter`. + os.environ["RAY_AIR_NEW_OUTPUT"] = "0" + + # Run the actual experiment (using Tune). + start_time = time.time() + results = tune.Tuner( + trainable or config.algo_class, + param_space=config, + run_config=air.RunConfig( + stop=stop, + verbose=args.verbose, + callbacks=tune_callbacks, + checkpoint_config=air.CheckpointConfig( + checkpoint_frequency=args.checkpoint_freq, + checkpoint_at_end=args.checkpoint_at_end, + ), + progress_reporter=progress_reporter, + ), + tune_config=tune.TuneConfig( + num_samples=args.num_samples, + max_concurrent_trials=args.max_concurrent_trials, + scheduler=scheduler, + ), + ).fit() + time_taken = time.time() - start_time + + ray.shutdown() + + # If run as a test, check whether we reached the specified success criteria. + test_passed = False + if args.as_test: + # Success metric not provided, try extracting it from `stop`. + if success_metric is None: + for try_it in [ + f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}", + f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}", + ]: + if try_it in stop: + success_metric = {try_it: stop[try_it]} + break + if success_metric is None: + success_metric = { + f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward, + } + # TODO (sven): Make this work for more than one metric (AND-logic?). + # Get maximum value of `metric` over all trials + # (check if at least one trial achieved some learning, not just the final one). + success_metric_key, success_metric_value = next(iter(success_metric.items())) + best_value = max( + row[success_metric_key] for _, row in results.get_dataframe().iterrows() + ) + if best_value >= success_metric_value: + test_passed = True + print(f"`{success_metric_key}` of {success_metric_value} reached! ok") + + if args.as_release_test: + trial = results._experiment_analysis.trials[0] + stats = trial.last_result + stats.pop("config", None) + json_summary = { + "time_taken": float(time_taken), + "trial_states": [trial.status], + "last_update": float(time.time()), + "stats": stats, + "passed": [test_passed], + "not_passed": [not test_passed], + "failures": {str(trial): 1} if not test_passed else {}, + } + with open( + os.environ.get("TEST_OUTPUT_JSON", "/tmp/learning_test.json"), + "wt", + ) as f: + try: + json.dump(json_summary, f) + # Something went wrong writing json. Try again w/ simplified stats. + except Exception: + from ray.rllib.algorithms.algorithm import Algorithm + + simplified_stats = { + k: stats[k] for k in Algorithm._progress_metrics if k in stats + } + json_summary["stats"] = simplified_stats + json.dump(json_summary, f) + + if not test_passed: + raise ValueError( + f"`{success_metric_key}` of {success_metric_value} not reached!" + ) + + return results + + +def check_same_batch(batch1, batch2) -> None: + """Check if both batches are (almost) identical. + + For MultiAgentBatches, the step count and individual policy's + SampleBatches are checked for identity. For SampleBatches, identity is + checked as the almost numerical key-value-pair identity between batches + with ray.rllib.utils.test_utils.check(). unroll_id is compared only if + both batches have an unroll_id. + + Args: + batch1: Batch to compare against batch2 + batch2: Batch to compare against batch1 + """ + # Avoids circular import + from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch + + assert type(batch1) is type( + batch2 + ), "Input batches are of different types {} and {}".format( + str(type(batch1)), str(type(batch2)) + ) + + def check_sample_batches(_batch1, _batch2, _policy_id=None): + unroll_id_1 = _batch1.get("unroll_id", None) + unroll_id_2 = _batch2.get("unroll_id", None) + # unroll IDs only have to fit if both batches have them + if unroll_id_1 is not None and unroll_id_2 is not None: + assert unroll_id_1 == unroll_id_2 + + batch1_keys = set() + for k, v in _batch1.items(): + # unroll_id is compared above already + if k == "unroll_id": + continue + check(v, _batch2[k]) + batch1_keys.add(k) + + batch2_keys = set(_batch2.keys()) + # unroll_id is compared above already + batch2_keys.discard("unroll_id") + _difference = batch1_keys.symmetric_difference(batch2_keys) + + # Cases where one batch has info and the other has not + if _policy_id: + assert not _difference, ( + "SampleBatches for policy with ID {} " + "don't share information on the " + "following information: \n{}" + "".format(_policy_id, _difference) + ) + else: + assert not _difference, ( + "SampleBatches don't share information " + "on the following information: \n{}" + "".format(_difference) + ) + + if type(batch1) is SampleBatch: + check_sample_batches(batch1, batch2) + elif type(batch1) is MultiAgentBatch: + assert batch1.count == batch2.count + batch1_ids = set() + for policy_id, policy_batch in batch1.policy_batches.items(): + check_sample_batches( + policy_batch, batch2.policy_batches[policy_id], policy_id + ) + batch1_ids.add(policy_id) + + # Case where one ma batch has info on a policy the other has not + batch2_ids = set(batch2.policy_batches.keys()) + difference = batch1_ids.symmetric_difference(batch2_ids) + assert ( + not difference + ), f"MultiAgentBatches don't share the following information: \n{difference}." + else: + raise ValueError("Unsupported batch type " + str(type(batch1))) + + +def check_reproducibilty( + algo_class: Type["Algorithm"], + algo_config: "AlgorithmConfig", + *, + fw_kwargs: Dict[str, Any], + training_iteration: int = 1, +) -> None: + # TODO @kourosh: we can get rid of examples/deterministic_training.py once + # this is added to all algorithms + """Check if the algorithm is reproducible across different testing conditions: + + frameworks: all input frameworks + num_gpus: int(os.environ.get("RLLIB_NUM_GPUS", "0")) + num_workers: 0 (only local workers) or + 4 ((1) local workers + (4) remote workers) + num_envs_per_env_runner: 2 + + Args: + algo_class: Algorithm class to test. + algo_config: Base config to use for the algorithm. + fw_kwargs: Framework iterator keyword arguments. + training_iteration: Number of training iterations to run. + + Returns: + None + + Raises: + It raises an AssertionError if the algorithm is not reproducible. + """ + from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID + from ray.rllib.utils.metrics.learner_info import LEARNER_INFO + + stop_dict = {TRAINING_ITERATION: training_iteration} + # use 0 and 2 workers (for more that 4 workers we have to make sure the instance + # type in ci build has enough resources) + for num_workers in [0, 2]: + algo_config = ( + algo_config.debugging(seed=42).env_runners( + num_env_runners=num_workers, num_envs_per_env_runner=2 + ) + # new API + .learners( + num_gpus_per_learner=int(os.environ.get("RLLIB_NUM_GPUS", "0")), + ) + # old API + .resources( + num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")), + ) + ) + + print( + f"Testing reproducibility of {algo_class.__name__}" + f" with {num_workers} workers" + ) + print("/// config") + pprint.pprint(algo_config.to_dict()) + # test tune.Tuner().fit() reproducibility + results1 = tune.Tuner( + algo_class, + param_space=algo_config.to_dict(), + run_config=air.RunConfig(stop=stop_dict, verbose=1), + ).fit() + results1 = results1.get_best_result().metrics + + results2 = tune.Tuner( + algo_class, + param_space=algo_config.to_dict(), + run_config=air.RunConfig(stop=stop_dict, verbose=1), + ).fit() + results2 = results2.get_best_result().metrics + + # Test rollout behavior. + check( + results1[ENV_RUNNER_RESULTS]["hist_stats"], + results2[ENV_RUNNER_RESULTS]["hist_stats"], + ) + # As well as training behavior (minibatch sequence during SGD + # iterations). + # As well as training behavior (minibatch sequence during SGD + # iterations). + if algo_config.enable_rl_module_and_learner: + check( + results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID], + results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID], + ) + else: + check( + results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"], + results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"], + ) + + +def get_cartpole_dataset_reader(batch_size: int = 1) -> "DatasetReader": + """Returns a DatasetReader for the cartpole dataset. + Args: + batch_size: The batch size to use for the reader. + Returns: + A rllib DatasetReader for the cartpole dataset. + """ + from ray.rllib.algorithms import AlgorithmConfig + from ray.rllib.offline import IOContext + from ray.rllib.offline.dataset_reader import ( + DatasetReader, + get_dataset_and_shards, + ) + + path = "tests/data/cartpole/large.json" + input_config = {"format": "json", "paths": path} + dataset, _ = get_dataset_and_shards( + AlgorithmConfig().offline_data(input_="dataset", input_config=input_config) + ) + ioctx = IOContext( + config=( + AlgorithmConfig() + .training(train_batch_size=batch_size) + .offline_data(actions_in_input_normalized=True) + ), + worker_index=0, + ) + reader = DatasetReader(dataset, ioctx) + return reader + + +class ModelChecker: + """Helper class to compare architecturally identical Models across frameworks. + + Holds a ModelConfig, such that individual models can be added simply via their + framework string (by building them with config.build(framework=...). + A call to `check()` forces all added models to be compared in terms of their + number of trainable and non-trainable parameters, as well as, their + computation results given a common weights structure and values and identical + inputs to the models. + """ + + def __init__(self, config): + self.config = config + + # To compare number of params between frameworks. + self.param_counts = {} + # To compare computed outputs from fixed-weights-nets between frameworks. + self.output_values = {} + + # We will pass an observation filled with this one random value through + # all DL networks (after they have been set to fixed-weights) to compare + # the computed outputs. + self.random_fill_input_value = np.random.uniform(-0.01, 0.01) + + # Dict of models to check against each other. + self.models = {} + + def add(self, framework: str = "torch", obs=True, state=False) -> Any: + """Builds a new Model for the given framework.""" + model = self.models[framework] = self.config.build(framework=framework) + + # Pass a B=1 observation through the model. + inputs = np.full( + [1] + ([1] if state else []) + list(self.config.input_dims), + self.random_fill_input_value, + ) + if obs: + inputs = {Columns.OBS: inputs} + if state: + inputs[Columns.STATE_IN] = tree.map_structure( + lambda s: np.zeros(shape=[1] + list(s)), state + ) + if framework == "torch": + from ray.rllib.utils.torch_utils import convert_to_torch_tensor + + inputs = convert_to_torch_tensor(inputs) + # w/ old specs: inputs = model.input_specs.fill(self.random_fill_input_value) + + outputs = model(inputs) + + # Bring model into a reproducible, comparable state (so we can compare + # computations across frameworks). Use only a value-sequence of len=1 here + # as it could possibly be that the layers are stored in different order + # across the different frameworks. + model._set_to_dummy_weights(value_sequence=(self.random_fill_input_value,)) + + # Perform another forward pass. + comparable_outputs = model(inputs) + + # Store the number of parameters for this framework's net. + self.param_counts[framework] = model.get_num_parameters() + # Store the fixed-weights-net outputs for this framework's net. + if framework == "torch": + self.output_values[framework] = tree.map_structure( + lambda s: s.detach().numpy() if s is not None else None, + comparable_outputs, + ) + else: + self.output_values[framework] = tree.map_structure( + lambda s: s.numpy() if s is not None else None, comparable_outputs + ) + return outputs + + def check(self): + """Compares all added Models with each other and possibly raises errors.""" + + main_key = next(iter(self.models.keys())) + # Compare number of trainable and non-trainable params between all + # frameworks. + for c in self.param_counts.values(): + check(c, self.param_counts[main_key]) + + # Compare dummy outputs by exact values given that all nets received the + # same input and all nets have the same (dummy) weight values. + for v in self.output_values.values(): + check(v, self.output_values[main_key], atol=0.0005) + + +def _get_mean_action_from_algorithm(alg: "Algorithm", obs: np.ndarray) -> np.ndarray: + """Returns the mean action computed by the given algorithm. + + Note: This makes calls to `Algorithm.compute_single_action` + + Args: + alg: The constructed algorithm to run inference on. + obs: The observation to compute the action for. + + Returns: + The mean action computed by the algorithm over 5000 samples. + + """ + out = [] + for _ in range(5000): + out.append(float(alg.compute_single_action(obs))) + return np.mean(out) + + +def check_supported_spaces( + alg: str, + config: "AlgorithmConfig", + train: bool = True, + check_bounds: bool = False, + frameworks: Optional[Tuple[str]] = None, + use_gpu: bool = False, +): + """Checks whether the given algorithm supports different action and obs spaces. + + Performs the checks by constructing an rllib algorithm from the config and + checking to see that the model inside the policy is the correct one given + the action and obs spaces. For example if the action space is discrete and + the obs space is an image, then the model should be a vision network with + a categorical action distribution. + + Args: + alg: The name of the algorithm to test. + config: The config to use for the algorithm. + train: Whether to train the algorithm for a few iterations. + check_bounds: Whether to check the bounds of the action space. + frameworks: The frameworks to test the algorithm with. + use_gpu: Whether to check support for training on a gpu. + + + """ + # Do these imports here because otherwise we have circular imports. + from ray.rllib.examples.envs.classes.random_env import RandomEnv + from ray.rllib.models.torch.complex_input_net import ( + ComplexInputNetwork as TorchComplexNet, + ) + from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFCNet + from ray.rllib.models.torch.visionnet import VisionNetwork as TorchVisionNet + + action_spaces_to_test = { + # Test discrete twice here until we support multi_binary action spaces + "discrete": Discrete(5), + "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), + "int_actions": Box(0, 3, (2, 3), dtype=np.int32), + "multidiscrete": MultiDiscrete([1, 2, 3, 4]), + "tuple": GymTuple( + [Discrete(2), Discrete(3), Box(-1.0, 1.0, (5,), dtype=np.float32)] + ), + "dict": GymDict( + { + "action_choice": Discrete(3), + "parameters": Box(-1.0, 1.0, (1,), dtype=np.float32), + "yet_another_nested_dict": GymDict( + {"a": GymTuple([Discrete(2), Discrete(3)])} + ), + } + ), + } + + observation_spaces_to_test = { + "multi_binary": MultiBinary([3, 10, 10]), + "discrete": Discrete(5), + "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), + "vector2d": Box(-1.0, 1.0, (5, 5), dtype=np.float32), + "image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32), + "tuple": GymTuple([Discrete(10), Box(-1.0, 1.0, (5,), dtype=np.float32)]), + "dict": GymDict( + { + "task": Discrete(10), + "position": Box(-1.0, 1.0, (5,), dtype=np.float32), + } + ), + } + + # The observation spaces that we test RLModules with + rlmodule_supported_observation_spaces = [ + "multi_binary", + "discrete", + "continuous", + "image", + "tuple", + "dict", + ] + + # The action spaces that we test RLModules with + rlmodule_supported_action_spaces = ["discrete", "continuous"] + + default_observation_space = default_action_space = "discrete" + + config["log_level"] = "ERROR" + config["env"] = RandomEnv + + def _do_check(alg, config, a_name, o_name): + # We need to copy here so that this validation does not affect the actual + # validation method call further down the line. + config_copy = config.copy() + config_copy.validate() + # If RLModules are enabled, we need to skip a few tests for now: + if config_copy.enable_rl_module_and_learner: + # Skip PPO cases in which RLModules don't support the given spaces yet. + if o_name not in rlmodule_supported_observation_spaces: + logger.warning( + "Skipping PPO test with RLModules for obs space {}".format(o_name) + ) + return + if a_name not in rlmodule_supported_action_spaces: + logger.warning( + "Skipping PPO test with RLModules for action space {}".format( + a_name + ) + ) + return + + fw = config["framework"] + action_space = action_spaces_to_test[a_name] + obs_space = observation_spaces_to_test[o_name] + print( + "=== Testing {} (fw={}) action_space={} obs_space={} ===".format( + alg, fw, action_space, obs_space + ) + ) + t0 = time.time() + config.update_from_dict( + dict( + env_config=dict( + action_space=action_space, + observation_space=obs_space, + reward_space=Box(1.0, 1.0, shape=(), dtype=np.float32), + p_terminated=1.0, + check_action_bounds=check_bounds, + ) + ) + ) + stat = "ok" + + try: + algo = config.build() + except ray.exceptions.RayActorError as e: + if len(e.args) >= 2 and isinstance(e.args[2], UnsupportedSpaceException): + stat = "unsupported" + elif isinstance(e.args[0].args[2], UnsupportedSpaceException): + stat = "unsupported" + else: + raise + except UnsupportedSpaceException: + stat = "unsupported" + else: + if alg not in ["SAC", "PPO"]: + # 2D (image) input: Expect VisionNet. + if o_name in ["atari", "image"]: + assert isinstance(algo.get_policy().model, TorchVisionNet) + # 1D input: Expect FCNet. + elif o_name == "continuous": + assert isinstance(algo.get_policy().model, TorchFCNet) + # Could be either one: ComplexNet (if disabled Preprocessor) + # or FCNet (w/ Preprocessor). + elif o_name == "vector2d": + assert isinstance( + algo.get_policy().model, (TorchComplexNet, TorchFCNet) + ) + if train: + algo.train() + algo.stop() + print("Test: {}, ran in {}s".format(stat, time.time() - t0)) + + if not frameworks: + frameworks = ("tf2", "tf", "torch") + + _do_check_remote = ray.remote(_do_check) + _do_check_remote = _do_check_remote.options(num_gpus=1 if use_gpu else 0) + # Test all action spaces first. + for a_name in action_spaces_to_test.keys(): + o_name = default_observation_space + ray.get(_do_check_remote.remote(alg, config, a_name, o_name)) + + # Now test all observation spaces. + for o_name in observation_spaces_to_test.keys(): + a_name = default_action_space + ray.get(_do_check_remote.remote(alg, config, a_name, o_name)) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_run_builder.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_run_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..1a4116f245203e7cedbc9348c893305186dd566d --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_run_builder.py @@ -0,0 +1,115 @@ +import logging +import os +import time + +from ray.util.debug import log_once +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.framework import try_import_tf + +tf1, tf, tfv = try_import_tf() +logger = logging.getLogger(__name__) + + +@OldAPIStack +class _TFRunBuilder: + """Used to incrementally build up a TensorFlow run. + + This is particularly useful for batching ops from multiple different + policies in the multi-agent setting. + """ + + def __init__(self, session, debug_name): + self.session = session + self.debug_name = debug_name + self.feed_dict = {} + self.fetches = [] + self._executed = None + + def add_feed_dict(self, feed_dict): + assert not self._executed + for k in feed_dict: + if k in self.feed_dict: + raise ValueError("Key added twice: {}".format(k)) + self.feed_dict.update(feed_dict) + + def add_fetches(self, fetches): + assert not self._executed + base_index = len(self.fetches) + self.fetches.extend(fetches) + return list(range(base_index, len(self.fetches))) + + def get(self, to_fetch): + if self._executed is None: + try: + self._executed = _run_timeline( + self.session, + self.fetches, + self.debug_name, + self.feed_dict, + os.environ.get("TF_TIMELINE_DIR"), + ) + except Exception as e: + logger.exception( + "Error fetching: {}, feed_dict={}".format( + self.fetches, self.feed_dict + ) + ) + raise e + if isinstance(to_fetch, int): + return self._executed[to_fetch] + elif isinstance(to_fetch, list): + return [self.get(x) for x in to_fetch] + elif isinstance(to_fetch, tuple): + return tuple(self.get(x) for x in to_fetch) + else: + raise ValueError("Unsupported fetch type: {}".format(to_fetch)) + + +_count = 0 + + +def _run_timeline(sess, ops, debug_name, feed_dict=None, timeline_dir=None): + if feed_dict is None: + feed_dict = {} + + if timeline_dir: + from tensorflow.python.client import timeline + + try: + run_options = tf1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) + except AttributeError: + run_options = None + # In local mode, tf1.RunOptions is not available, see #26511 + if log_once("tf1.RunOptions_not_available"): + logger.exception( + "Can not access tf.RunOptions.FULL_TRACE. This may be because " + "you have used `ray.init(local_mode=True)`. RLlib will use " + "timeline without `options=tf.RunOptions.FULL_TRACE`." + ) + run_metadata = tf1.RunMetadata() + start = time.time() + fetches = sess.run( + ops, options=run_options, run_metadata=run_metadata, feed_dict=feed_dict + ) + trace = timeline.Timeline(step_stats=run_metadata.step_stats) + global _count + outf = os.path.join( + timeline_dir, + "timeline-{}-{}-{}.json".format(debug_name, os.getpid(), _count % 10), + ) + _count += 1 + trace_file = open(outf, "w") + logger.info( + "Wrote tf timeline ({} s) to {}".format( + time.time() - start, os.path.abspath(outf) + ) + ) + trace_file.write(trace.generate_chrome_trace_format()) + else: + if log_once("tf_timeline"): + logger.info( + "Executing TF run without tracing. To dump TF timeline traces " + "to disk, set the TF_TIMELINE_DIR environment variable." + ) + fetches = sess.run(ops, feed_dict=feed_dict) + return fetches diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_utils.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c32ad32d268658ff40b8846ab018c876e1f986c1 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/tf_utils.py @@ -0,0 +1,812 @@ +import logging +from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union + +import gymnasium as gym +import numpy as np +import tree # pip install dm_tree +from gymnasium.spaces import Discrete, MultiDiscrete + +from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.numpy import SMALL_NUMBER +from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space +from ray.rllib.utils.typing import ( + LocalOptimizer, + ModelGradients, + NetworkType, + PartialAlgorithmConfigDict, + SpaceStruct, + TensorStructType, + TensorType, +) + +if TYPE_CHECKING: + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + from ray.rllib.core.learner.learner import ParamDict + from ray.rllib.policy.eager_tf_policy import EagerTFPolicy + from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 + from ray.rllib.policy.tf_policy import TFPolicy + +logger = logging.getLogger(__name__) +tf1, tf, tfv = try_import_tf() + + +@PublicAPI +def clip_gradients( + gradients_dict: "ParamDict", + *, + grad_clip: Optional[float] = None, + grad_clip_by: str, +) -> Optional[float]: + """Performs gradient clipping on a grad-dict based on a clip value and clip mode. + + Changes the provided gradient dict in place. + + Args: + gradients_dict: The gradients dict, mapping str to gradient tensors. + grad_clip: The value to clip with. The way gradients are clipped is defined + by the `grad_clip_by` arg (see below). + grad_clip_by: One of 'value', 'norm', or 'global_norm'. + + Returns: + If `grad_clip_by`="global_norm" and `grad_clip` is not None, returns the global + norm of all tensors, otherwise returns None. + """ + # No clipping, return. + if grad_clip is None: + return + + # Clip by value (each gradient individually). + if grad_clip_by == "value": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = tf.clip_by_value(v, -grad_clip, grad_clip) + + # Clip by L2-norm (per gradient tensor). + elif grad_clip_by == "norm": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = tf.clip_by_norm(v, grad_clip) + + # Clip by global L2-norm (across all gradient tensors). + else: + assert grad_clip_by == "global_norm" + + clipped_grads, global_norm = tf.clip_by_global_norm( + list(gradients_dict.values()), grad_clip + ) + for k, v in zip(gradients_dict.copy().keys(), clipped_grads): + gradients_dict[k] = v + + # Return the computed global norm scalar. + return global_norm + + +@PublicAPI +def explained_variance(y: TensorType, pred: TensorType) -> TensorType: + """Computes the explained variance for a pair of labels and predictions. + + The formula used is: + max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2)) + + Args: + y: The labels. + pred: The predictions. + + Returns: + The explained variance given a pair of labels and predictions. + """ + _, y_var = tf.nn.moments(y, axes=[0]) + _, diff_var = tf.nn.moments(y - pred, axes=[0]) + return tf.maximum(-1.0, 1 - (diff_var / (y_var + SMALL_NUMBER))) + + +@PublicAPI +def flatten_inputs_to_1d_tensor( + inputs: TensorStructType, + spaces_struct: Optional[SpaceStruct] = None, + time_axis: bool = False, +) -> TensorType: + """Flattens arbitrary input structs according to the given spaces struct. + + Returns a single 1D tensor resulting from the different input + components' values. + + Thereby: + - Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes + are not treated differently from other types of Boxes and get + flattened as well. + - Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with + Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]. + - MultiDiscrete values are multi-one-hot'd, e.g. a batch of + [[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in + [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]. + + Args: + inputs: The inputs to be flattened. + spaces_struct: The structure of the spaces that behind the input + time_axis: Whether all inputs have a time-axis (after the batch axis). + If True, will keep not only the batch axis (0th), but the time axis + (1st) as-is and flatten everything from the 2nd axis up. + + Returns: + A single 1D tensor resulting from concatenating all + flattened/one-hot'd input components. Depending on the time_axis flag, + the shape is (B, n) or (B, T, n). + + .. testcode:: + :skipif: True + + # B=2 + from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor + from gymnasium.spaces import Discrete, Box + out = flatten_inputs_to_1d_tensor( + {"a": [1, 0], "b": [[[0.0], [0.1]], [1.0], [1.1]]}, + spaces_struct=dict(a=Discrete(2), b=Box(shape=(2, 1))) + ) + print(out) + + # B=2; T=2 + out = flatten_inputs_to_1d_tensor( + ([[1, 0], [0, 1]], + [[[0.0, 0.1], [1.0, 1.1]], [[2.0, 2.1], [3.0, 3.1]]]), + spaces_struct=tuple([Discrete(2), Box(shape=(2, ))]), + time_axis=True + ) + print(out) + + .. testoutput:: + + [[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]] # B=2 n=4 + [[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]], + [[1.0, 0.0, 2.0, 2.1], [0.0, 1.0, 3.0, 3.1]]] # B=2 T=2 n=4 + """ + + flat_inputs = tree.flatten(inputs) + flat_spaces = ( + tree.flatten(spaces_struct) + if spaces_struct is not None + else [None] * len(flat_inputs) + ) + + B = None + T = None + out = [] + for input_, space in zip(flat_inputs, flat_spaces): + input_ = tf.convert_to_tensor(input_) + shape = tf.shape(input_) + # Store batch and (if applicable) time dimension. + if B is None: + B = shape[0] + if time_axis: + T = shape[1] + + # One-hot encoding. + if isinstance(space, Discrete): + if time_axis: + input_ = tf.reshape(input_, [B * T]) + out.append(tf.cast(one_hot(input_, space), tf.float32)) + elif isinstance(space, MultiDiscrete): + if time_axis: + input_ = tf.reshape(input_, [B * T, -1]) + out.append(tf.cast(one_hot(input_, space), tf.float32)) + # Flatten. + else: + if time_axis: + input_ = tf.reshape(input_, [B * T, -1]) + else: + input_ = tf.reshape(input_, [B, -1]) + out.append(tf.cast(input_, tf.float32)) + + merged = tf.concat(out, axis=-1) + # Restore the time-dimension, if applicable. + if time_axis: + merged = tf.reshape(merged, [B, T, -1]) + + return merged + + +@PublicAPI +def get_gpu_devices() -> List[str]: + """Returns a list of GPU device names, e.g. ["/gpu:0", "/gpu:1"]. + + Supports both tf1.x and tf2.x. + + Returns: + List of GPU device names (str). + """ + if tfv == 1: + from tensorflow.python.client import device_lib + + devices = device_lib.list_local_devices() + else: + try: + devices = tf.config.list_physical_devices() + except Exception: + devices = tf.config.experimental.list_physical_devices() + + # Expect "GPU", but also stuff like: "XLA_GPU". + return [d.name for d in devices if "GPU" in d.device_type] + + +@PublicAPI +def get_placeholder( + *, + space: Optional[gym.Space] = None, + value: Optional[Any] = None, + name: Optional[str] = None, + time_axis: bool = False, + flatten: bool = True, +) -> "tf1.placeholder": + """Returns a tf1.placeholder object given optional hints, such as a space. + + Note that the returned placeholder will always have a leading batch + dimension (None). + + Args: + space: An optional gym.Space to hint the shape and dtype of the + placeholder. + value: An optional value to hint the shape and dtype of the + placeholder. + name: An optional name for the placeholder. + time_axis: Whether the placeholder should also receive a time + dimension (None). + flatten: Whether to flatten the given space into a plain Box space + and then create the placeholder from the resulting space. + + Returns: + The tf1 placeholder. + """ + from ray.rllib.models.catalog import ModelCatalog + + if space is not None: + if isinstance(space, (gym.spaces.Dict, gym.spaces.Tuple)): + if flatten: + return ModelCatalog.get_action_placeholder(space, None) + else: + return tree.map_structure_with_path( + lambda path, component: get_placeholder( + space=component, + name=name + "." + ".".join([str(p) for p in path]), + ), + get_base_struct_from_space(space), + ) + return tf1.placeholder( + shape=(None,) + ((None,) if time_axis else ()) + space.shape, + dtype=tf.float32 if space.dtype == np.float64 else space.dtype, + name=name, + ) + else: + assert value is not None + shape = value.shape[1:] + return tf1.placeholder( + shape=(None,) + + ((None,) if time_axis else ()) + + (shape if isinstance(shape, tuple) else tuple(shape.as_list())), + dtype=tf.float32 if value.dtype == np.float64 else value.dtype, + name=name, + ) + + +@PublicAPI +def get_tf_eager_cls_if_necessary( + orig_cls: Type["TFPolicy"], + config: Union["AlgorithmConfig", PartialAlgorithmConfigDict], +) -> Type[Union["TFPolicy", "EagerTFPolicy", "EagerTFPolicyV2"]]: + """Returns the corresponding tf-eager class for a given TFPolicy class. + + Args: + orig_cls: The original TFPolicy class to get the corresponding tf-eager + class for. + config: The Algorithm config dict or AlgorithmConfig object. + + Returns: + The tf eager policy class corresponding to the given TFPolicy class. + """ + cls = orig_cls + framework = config.get("framework", "tf") + + if framework in ["tf2", "tf"] and not tf1: + raise ImportError("Could not import tensorflow!") + + if framework == "tf2": + if not tf1.executing_eagerly(): + tf1.enable_eager_execution() + assert tf1.executing_eagerly() + + from ray.rllib.policy.tf_policy import TFPolicy + from ray.rllib.policy.eager_tf_policy import EagerTFPolicy + from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 + + # Create eager-class (if not already one). + if hasattr(orig_cls, "as_eager") and not issubclass(orig_cls, EagerTFPolicy): + cls = orig_cls.as_eager() + # Could be some other type of policy or already + # eager-ized. + elif not issubclass(orig_cls, TFPolicy): + pass + else: + raise ValueError( + "This policy does not support eager execution: {}".format(orig_cls) + ) + + # Now that we know, policy is an eager one, add tracing, if necessary. + if config.get("eager_tracing") and issubclass( + cls, (EagerTFPolicy, EagerTFPolicyV2) + ): + cls = cls.with_tracing() + return cls + + +@PublicAPI +def huber_loss(x: TensorType, delta: float = 1.0) -> TensorType: + """Computes the huber loss for a given term and delta parameter. + + Reference: https://en.wikipedia.org/wiki/Huber_loss + Note that the factor of 0.5 is implicitly included in the calculation. + + Formula: + L = 0.5 * x^2 for small abs x (delta threshold) + L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold) + + Args: + x: The input term, e.g. a TD error. + delta: The delta parmameter in the above formula. + + Returns: + The Huber loss resulting from `x` and `delta`. + """ + return tf.where( + tf.abs(x) < delta, # for small x -> apply the Huber correction + tf.math.square(x) * 0.5, + delta * (tf.abs(x) - 0.5 * delta), + ) + + +@PublicAPI +def l2_loss(x: TensorType) -> TensorType: + """Computes half the L2 norm over a tensor's values without the sqrt. + + output = 0.5 * sum(x ** 2) + + Args: + x: The input tensor. + + Returns: + 0.5 times the L2 norm over the given tensor's values (w/o sqrt). + """ + return 0.5 * tf.reduce_sum(tf.pow(x, 2.0)) + + +@PublicAPI +def make_tf_callable( + session_or_none: Optional["tf1.Session"], dynamic_shape: bool = False +) -> Callable: + """Returns a function that can be executed in either graph or eager mode. + + The function must take only positional args. + + If eager is enabled, this will act as just a function. Otherwise, it + will build a function that executes a session run with placeholders + internally. + + Args: + session_or_none: tf.Session if in graph mode, else None. + dynamic_shape: True if the placeholders should have a dynamic + batch dimension. Otherwise they will be fixed shape. + + Returns: + A function that can be called in either eager or static-graph mode. + """ + + if tf.executing_eagerly(): + assert session_or_none is None + else: + assert session_or_none is not None + + def make_wrapper(fn): + # Static-graph mode: Create placeholders and make a session call each + # time the wrapped function is called. Returns the output of this + # session call. + if session_or_none is not None: + args_placeholders = [] + kwargs_placeholders = {} + + symbolic_out = [None] + + def call(*args, **kwargs): + args_flat = [] + for a in args: + if type(a) is list: + args_flat.extend(a) + else: + args_flat.append(a) + args = args_flat + + # We have not built any placeholders yet: Do this once here, + # then reuse the same placeholders each time we call this + # function again. + if symbolic_out[0] is None: + with session_or_none.graph.as_default(): + + def _create_placeholders(path, value): + if dynamic_shape: + if len(value.shape) > 0: + shape = (None,) + value.shape[1:] + else: + shape = () + else: + shape = value.shape + return tf1.placeholder( + dtype=value.dtype, + shape=shape, + name=".".join([str(p) for p in path]), + ) + + placeholders = tree.map_structure_with_path( + _create_placeholders, args + ) + for ph in tree.flatten(placeholders): + args_placeholders.append(ph) + + placeholders = tree.map_structure_with_path( + _create_placeholders, kwargs + ) + for k, ph in placeholders.items(): + kwargs_placeholders[k] = ph + + symbolic_out[0] = fn(*args_placeholders, **kwargs_placeholders) + feed_dict = dict(zip(args_placeholders, tree.flatten(args))) + tree.map_structure( + lambda ph, v: feed_dict.__setitem__(ph, v), + kwargs_placeholders, + kwargs, + ) + ret = session_or_none.run(symbolic_out[0], feed_dict) + return ret + + return call + # Eager mode (call function as is). + else: + return fn + + return make_wrapper + + +# TODO (sven): Deprecate this function once we have moved completely to the Learner API. +# Replaced with `clip_gradients()`. +@PublicAPI +def minimize_and_clip( + optimizer: LocalOptimizer, + objective: TensorType, + var_list: List["tf.Variable"], + clip_val: float = 10.0, +) -> ModelGradients: + """Computes, then clips gradients using objective, optimizer and var list. + + Ensures the norm of the gradients for each variable is clipped to + `clip_val`. + + Args: + optimizer: Either a shim optimizer (tf eager) containing a + tf.GradientTape under `self.tape` or a tf1 local optimizer + object. + objective: The loss tensor to calculate gradients on. + var_list: The list of tf.Variables to compute gradients over. + clip_val: The global norm clip value. Will clip around -clip_val and + +clip_val. + + Returns: + The resulting model gradients (list or tuples of grads + vars) + corresponding to the input `var_list`. + """ + # Accidentally passing values < 0.0 will break all gradients. + assert clip_val is None or clip_val > 0.0, clip_val + + if tf.executing_eagerly(): + tape = optimizer.tape + grads_and_vars = list(zip(list(tape.gradient(objective, var_list)), var_list)) + else: + grads_and_vars = optimizer.compute_gradients(objective, var_list=var_list) + + return [ + (tf.clip_by_norm(g, clip_val) if clip_val is not None else g, v) + for (g, v) in grads_and_vars + if g is not None + ] + + +@PublicAPI +def one_hot(x: TensorType, space: gym.Space) -> TensorType: + """Returns a one-hot tensor, given and int tensor and a space. + + Handles the MultiDiscrete case as well. + + Args: + x: The input tensor. + space: The space to use for generating the one-hot tensor. + + Returns: + The resulting one-hot tensor. + + Raises: + ValueError: If the given space is not a discrete one. + + .. testcode:: + :skipif: True + + import gymnasium as gym + import tensorflow as tf + from ray.rllib.utils.tf_utils import one_hot + x = tf.Variable([0, 3], dtype=tf.int32) # batch-dim=2 + # Discrete space with 4 (one-hot) slots per batch item. + s = gym.spaces.Discrete(4) + one_hot(x, s) + + .. testoutput:: + + + + .. testcode:: + :skipif: True + + x = tf.Variable([[0, 1, 2, 3]], dtype=tf.int32) # batch-dim=1 + # MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots + # per batch item. + s = gym.spaces.MultiDiscrete([5, 4, 4, 7]) + one_hot(x, s) + + .. testoutput:: + + + """ + if isinstance(space, Discrete): + return tf.one_hot(x, space.n, dtype=tf.float32) + elif isinstance(space, MultiDiscrete): + if isinstance(space.nvec[0], np.ndarray): + nvec = np.ravel(space.nvec) + x = tf.reshape(x, (x.shape[0], -1)) + else: + nvec = space.nvec + return tf.concat( + [tf.one_hot(x[:, i], n, dtype=tf.float32) for i, n in enumerate(nvec)], + axis=-1, + ) + else: + raise ValueError("Unsupported space for `one_hot`: {}".format(space)) + + +@PublicAPI +def reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType: + """Same as tf.reduce_mean() but ignores -inf values. + + Args: + x: The input tensor to reduce mean over. + axis: The axis over which to reduce. None for all axes. + + Returns: + The mean reduced inputs, ignoring inf values. + """ + mask = tf.not_equal(x, tf.float32.min) + x_zeroed = tf.where(mask, x, tf.zeros_like(x)) + return tf.math.reduce_sum(x_zeroed, axis) / tf.math.reduce_sum( + tf.cast(mask, tf.float32), axis + ) + + +@PublicAPI +def scope_vars( + scope: Union[str, "tf1.VariableScope"], trainable_only: bool = False +) -> List["tf.Variable"]: + """Get variables inside a given scope. + + Args: + scope: Scope in which the variables reside. + trainable_only: Whether or not to return only the variables that were + marked as trainable. + + Returns: + The list of variables in the given `scope`. + """ + return tf1.get_collection( + tf1.GraphKeys.TRAINABLE_VARIABLES + if trainable_only + else tf1.GraphKeys.VARIABLES, + scope=scope if isinstance(scope, str) else scope.name, + ) + + +@PublicAPI +def symlog(x: "tf.Tensor") -> "tf.Tensor": + """The symlog function as described in [1]: + + [1] Mastering Diverse Domains through World Models - 2023 + D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap + https://arxiv.org/pdf/2301.04104v1.pdf + """ + return tf.math.sign(x) * tf.math.log(tf.math.abs(x) + 1) + + +@PublicAPI +def inverse_symlog(y: "tf.Tensor") -> "tf.Tensor": + """Inverse of the `symlog` function as desribed in [1]: + + [1] Mastering Diverse Domains through World Models - 2023 + D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap + https://arxiv.org/pdf/2301.04104v1.pdf + """ + # To get to symlog inverse, we solve the symlog equation for x: + # y = sign(x) * log(|x| + 1) + # <=> y / sign(x) = log(|x| + 1) + # <=> y = log( x + 1) V x >= 0 + # -y = log(-x + 1) V x < 0 + # <=> exp(y) = x + 1 V x >= 0 + # exp(-y) = -x + 1 V x < 0 + # <=> exp(y) - 1 = x V x >= 0 + # exp(-y) - 1 = -x V x < 0 + # <=> exp(y) - 1 = x V x >= 0 (if x >= 0, then y must also be >= 0) + # -exp(-y) - 1 = x V x < 0 (if x < 0, then y must also be < 0) + # <=> sign(y) * (exp(|y|) - 1) = x + return tf.math.sign(y) * (tf.math.exp(tf.math.abs(y)) - 1) + + +@PublicAPI +def two_hot( + value: "tf.Tensor", + num_buckets: int = 255, + lower_bound: float = -20.0, + upper_bound: float = 20.0, + dtype=None, +): + """Returns a two-hot vector of dim=num_buckets with two entries that are non-zero. + + See [1] for more details: + [1] Mastering Diverse Domains through World Models - 2023 + D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap + https://arxiv.org/pdf/2301.04104v1.pdf + + Entries in the vector represent equally sized buckets within some fixed range + (`lower_bound` to `upper_bound`). + Those entries not 0.0 at positions k and k+1 encode the actual `value` and sum + up to 1.0. They are the weights multiplied by the buckets values at k and k+1 for + retrieving `value`. + + Example: + num_buckets=11 + lower_bound=-5 + upper_bound=5 + value=2.5 + -> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0] + -> [-5 -4 -3 -2 -1 0 1 2 3 4 5] (0.5*2 + 0.5*3=2.5) + + Example: + num_buckets=5 + lower_bound=-1 + upper_bound=1 + value=0.1 + -> [0.0, 0.0, 0.8, 0.2, 0.0] + -> [-1 -0.5 0 0.5 1] (0.2*0.5 + 0.8*0=0.1) + + Args: + value: The input tensor of shape (B,) to be two-hot encoded. + num_buckets: The number of buckets to two-hot encode into. + lower_bound: The lower bound value used for the encoding. If input values are + lower than this boundary, they will be encoded as `lower_bound`. + upper_bound: The upper bound value used for the encoding. If input values are + higher than this boundary, they will be encoded as `upper_bound`. + + Returns: + The two-hot encoded tensor of shape (B, num_buckets). + """ + # First make sure, values are clipped. + value = tf.clip_by_value(value, lower_bound, upper_bound) + # Tensor of batch indices: [0, B=batch size). + batch_indices = tf.cast( + tf.range(0, tf.shape(value)[0]), + dtype=dtype or tf.float32, + ) + # Calculate the step deltas (how much space between each bucket's central value?). + bucket_delta = (upper_bound - lower_bound) / (num_buckets - 1) + # Compute the float indices (might be non-int numbers: sitting between two buckets). + idx = (-lower_bound + value) / bucket_delta + # k + k = tf.math.floor(idx) + # k+1 + kp1 = tf.math.ceil(idx) + # In case k == kp1 (idx is exactly on the bucket boundary), move kp1 up by 1.0. + # Otherwise, this would result in a NaN in the returned two-hot tensor. + kp1 = tf.where(tf.equal(k, kp1), kp1 + 1.0, kp1) + # Iff `kp1` is one beyond our last index (because incoming value is larger than + # `upper_bound`), move it to one before k (kp1's weight is going to be 0.0 anyways, + # so it doesn't matter where it points to; we are just avoiding an index error + # with this). + kp1 = tf.where(tf.equal(kp1, num_buckets), kp1 - 2.0, kp1) + # The actual values found at k and k+1 inside the set of buckets. + values_k = lower_bound + k * bucket_delta + values_kp1 = lower_bound + kp1 * bucket_delta + # Compute the two-hot weights (adding up to 1.0) to use at index k and k+1. + weights_k = (value - values_kp1) / (values_k - values_kp1) + weights_kp1 = 1.0 - weights_k + # Compile a tensor of full paths (indices from batch index to feature index) to + # use for the scatter_nd op. + indices_k = tf.stack([batch_indices, k], -1) + indices_kp1 = tf.stack([batch_indices, kp1], -1) + indices = tf.concat([indices_k, indices_kp1], 0) + # The actual values (weights adding up to 1.0) to place at the computed indices. + updates = tf.concat([weights_k, weights_kp1], 0) + # Call the actual scatter update op, returning a zero-filled tensor, only changed + # at the given indices. + return tf.scatter_nd( + tf.cast(indices, tf.int32), + updates, + shape=(tf.shape(value)[0], num_buckets), + ) + + +@PublicAPI +def update_target_network( + main_net: NetworkType, + target_net: NetworkType, + tau: float, +) -> None: + """Updates a keras.Model target network using Polyak averaging. + + new_target_net_weight = ( + tau * main_net_weight + (1.0 - tau) * current_target_net_weight + ) + + Args: + main_net: The keras.Model to update from. + target_net: The target network to update. + tau: The tau value to use in the Polyak averaging formula. + """ + for old_var, current_var in zip(target_net.variables, main_net.variables): + updated_var = tau * current_var + (1.0 - tau) * old_var + old_var.assign(updated_var) + + +@PublicAPI +def zero_logps_from_actions(actions: TensorStructType) -> TensorType: + """Helper function useful for returning dummy logp's (0) for some actions. + + Args: + actions: The input actions. This can be any struct + of complex action components or a simple tensor of different + dimensions, e.g. [B], [B, 2], or {"a": [B, 4, 5], "b": [B]}. + + Returns: + A 1D tensor of 0.0 (dummy logp's) matching the batch + dim of `actions` (shape=[B]). + """ + # Need to flatten `actions` in case we have a complex action space. + # Take the 0th component to extract the batch dim. + action_component = tree.flatten(actions)[0] + logp_ = tf.zeros_like(action_component, dtype=tf.float32) + # Logp's should be single values (but with the same batch dim as + # `deterministic_actions` or `stochastic_actions`). In case + # actions are just [B], zeros_like works just fine here, but if + # actions are [B, ...], we have to reduce logp back to just [B]. + while len(logp_.shape) > 1: + logp_ = logp_[:, 0] + return logp_ + + +@DeveloperAPI +def warn_if_infinite_kl_divergence( + policy: Type["TFPolicy"], mean_kl: TensorType +) -> None: + def print_warning(): + logger.warning( + "KL divergence is non-finite, this will likely destabilize your model and" + " the training process. Action(s) in a specific state have near-zero" + " probability. This can happen naturally in deterministic environments" + " where the optimal policy has zero mass for a specific action. To fix this" + " issue, consider setting the coefficient for the KL loss term to zero or" + " increasing policy entropy." + ) + return tf.constant(0.0) + + if policy.loss_initialized(): + tf.cond( + tf.math.is_inf(mean_kl), + false_fn=lambda: tf.constant(0.0), + true_fn=lambda: print_warning(), + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/threading.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/threading.py new file mode 100644 index 0000000000000000000000000000000000000000..a9a4461dadbf69afa24ed8e9007fe326640dc7de --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/threading.py @@ -0,0 +1,34 @@ +from typing import Callable + +from ray.rllib.utils.annotations import OldAPIStack + + +@OldAPIStack +def with_lock(func: Callable) -> Callable: + """Use as decorator (@withlock) around object methods that need locking. + + Note: The object must have a self._lock = threading.Lock() property. + Locking thus works on the object level (no two locked methods of the same + object can be called asynchronously). + + Args: + func: The function to decorate/wrap. + + Returns: + The wrapped (object-level locked) function. + """ + + def wrapper(self, *a, **k): + try: + with self._lock: + return func(self, *a, **k) + except AttributeError as e: + if "has no attribute '_lock'" in e.args[0]: + raise AttributeError( + "Object {} must have a `self._lock` property (assigned " + "to a threading.RLock() object in its " + "constructor)!".format(self) + ) + raise e + + return wrapper diff --git a/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0d360d4d1488c48eb0dfa52a2a682b45e8c5e376 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py @@ -0,0 +1,745 @@ +import logging +import os +import warnings +from typing import Dict, List, Optional, TYPE_CHECKING, Union + +import gymnasium as gym +from gymnasium.spaces import Discrete, MultiDiscrete +import numpy as np +from packaging import version +import tree # pip install dm_tree + +from ray.rllib.models.repeated_values import RepeatedValues +from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.numpy import SMALL_NUMBER +from ray.rllib.utils.typing import ( + LocalOptimizer, + NetworkType, + SpaceStruct, + TensorStructType, + TensorType, +) + +if TYPE_CHECKING: + from ray.rllib.core.learner.learner import ParamDict, ParamList + from ray.rllib.policy.torch_policy import TorchPolicy + from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 + +logger = logging.getLogger(__name__) +torch, nn = try_import_torch() + +# Limit values suitable for use as close to a -inf logit. These are useful +# since -inf / inf cause NaNs during backprop. +FLOAT_MIN = -3.4e38 +FLOAT_MAX = 3.4e38 + +if torch: + TORCH_COMPILE_REQUIRED_VERSION = version.parse("2.0.0") +else: + TORCH_COMPILE_REQUIRED_VERSION = ValueError( + "torch is not installed. " "TORCH_COMPILE_REQUIRED_VERSION is " "not defined." + ) + + +# TODO (sven): Deprecate this function once we have moved completely to the Learner API. +# Replaced with `clip_gradients()`. +@PublicAPI +def apply_grad_clipping( + policy: "TorchPolicy", optimizer: LocalOptimizer, loss: TensorType +) -> Dict[str, TensorType]: + """Applies gradient clipping to already computed grads inside `optimizer`. + + Note: This function does NOT perform an analogous operation as + tf.clip_by_global_norm. It merely clips by norm (per gradient tensor) and + then computes the global norm across all given tensors (but without clipping + by that global norm). + + Args: + policy: The TorchPolicy, which calculated `loss`. + optimizer: A local torch optimizer object. + loss: The torch loss tensor. + + Returns: + An info dict containing the "grad_norm" key and the resulting clipped + gradients. + """ + grad_gnorm = 0 + if policy.config["grad_clip"] is not None: + clip_value = policy.config["grad_clip"] + else: + clip_value = np.inf + + num_none_grads = 0 + for param_group in optimizer.param_groups: + # Make sure we only pass params with grad != None into torch + # clip_grad_norm_. Would fail otherwise. + params = list(filter(lambda p: p.grad is not None, param_group["params"])) + if params: + # PyTorch clips gradients inplace and returns the norm before clipping + # We therefore need to compute grad_gnorm further down (fixes #4965) + global_norm = nn.utils.clip_grad_norm_(params, clip_value) + + if isinstance(global_norm, torch.Tensor): + global_norm = global_norm.cpu().numpy() + + grad_gnorm += min(global_norm, clip_value) + else: + num_none_grads += 1 + + # Note (Kourosh): grads could indeed be zero. This method should still return + # grad_gnorm in that case. + if num_none_grads == len(optimizer.param_groups): + # No grads available + return {} + return {"grad_gnorm": grad_gnorm} + + +@PublicAPI +def clip_gradients( + gradients_dict: "ParamDict", + *, + grad_clip: Optional[float] = None, + grad_clip_by: str = "value", +) -> TensorType: + """Performs gradient clipping on a grad-dict based on a clip value and clip mode. + + Changes the provided gradient dict in place. + + Args: + gradients_dict: The gradients dict, mapping str to gradient tensors. + grad_clip: The value to clip with. The way gradients are clipped is defined + by the `grad_clip_by` arg (see below). + grad_clip_by: One of 'value', 'norm', or 'global_norm'. + + Returns: + If `grad_clip_by`="global_norm" and `grad_clip` is not None, returns the global + norm of all tensors, otherwise returns None. + """ + # No clipping, return. + if grad_clip is None: + return + + # Clip by value (each gradient individually). + if grad_clip_by == "value": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = ( + None if v is None else torch.clip(v, -grad_clip, grad_clip) + ) + + # Clip by L2-norm (per gradient tensor). + elif grad_clip_by == "norm": + for k, v in gradients_dict.copy().items(): + if v is not None: + # Compute the L2-norm of the gradient tensor. + norm = v.norm(2).nan_to_num(neginf=-10e8, posinf=10e8) + # Clip all the gradients. + if norm > grad_clip: + v.mul_(grad_clip / norm) + + # Clip by global L2-norm (across all gradient tensors). + else: + assert ( + grad_clip_by == "global_norm" + ), f"`grad_clip_by` ({grad_clip_by}) must be one of [value|norm|global_norm]!" + gradients_list = list(gradients_dict.values()) + total_norm = compute_global_norm(gradients_list) + # We do want the coefficient to be in between 0.0 and 1.0, therefore + # if the global_norm is smaller than the clip value, we use the clip value + # as normalization constant. + device = gradients_list[0].device + clip_coef = grad_clip / torch.maximum( + torch.tensor(grad_clip).to(device), total_norm + 1e-6 + ) + # Note: multiplying by the clamped coef is redundant when the coef is clamped to + # 1, but doing so avoids a `if clip_coef < 1:` conditional which can require a + # CPU <=> device synchronization when the gradients do not reside in CPU memory. + clip_coef_clamped = torch.clamp(clip_coef, max=1.0) + for g in gradients_list: + if g is not None: + g.detach().mul_(clip_coef_clamped.to(g.device)) + return total_norm + + +@PublicAPI +def compute_global_norm(gradients_list: "ParamList") -> TensorType: + """Computes the global norm for a gradients dict. + + Args: + gradients_list: The gradients list containing parameters. + + Returns: + Returns the global norm of all tensors in `gradients_list`. + """ + # Define the norm type to be L2. + norm_type = 2.0 + # If we have no grads, return zero. + if len(gradients_list) == 0: + return torch.tensor(0.0) + device = gradients_list[0].device + + # Compute the global norm. + total_norm = torch.norm( + torch.stack( + [ + torch.norm(g.detach(), norm_type) + # Note, we want to avoid overflow in the norm computation, this does + # not affect the gradients themselves as we clamp by multiplying and + # not by overriding tensor values. + .nan_to_num(neginf=-10e8, posinf=10e8).to(device) + for g in gradients_list + if g is not None + ] + ), + norm_type, + ).nan_to_num(neginf=-10e8, posinf=10e8) + if torch.logical_or(total_norm.isnan(), total_norm.isinf()): + raise RuntimeError( + f"The total norm of order {norm_type} for gradients from " + "`parameters` is non-finite, so it cannot be clipped. " + ) + # Return the global norm. + return total_norm + + +@PublicAPI +def concat_multi_gpu_td_errors( + policy: Union["TorchPolicy", "TorchPolicyV2"] +) -> Dict[str, TensorType]: + """Concatenates multi-GPU (per-tower) TD error tensors given TorchPolicy. + + TD-errors are extracted from the TorchPolicy via its tower_stats property. + + Args: + policy: The TorchPolicy to extract the TD-error values from. + + Returns: + A dict mapping strings "td_error" and "mean_td_error" to the + corresponding concatenated and mean-reduced values. + """ + td_error = torch.cat( + [ + t.tower_stats.get("td_error", torch.tensor([0.0])).to(policy.device) + for t in policy.model_gpu_towers + ], + dim=0, + ) + policy.td_error = td_error + return { + "td_error": td_error, + "mean_td_error": torch.mean(td_error), + } + + +@PublicAPI +def convert_to_torch_tensor( + x: TensorStructType, + device: Optional[str] = None, + pin_memory: bool = False, +): + """Converts any struct to torch.Tensors. + + Args: + x: Any (possibly nested) struct, the values in which will be + converted and returned as a new struct with all leaves converted + to torch tensors. + device: The device to create the tensor on. + pin_memory: If True, will call the `pin_memory()` method on the created tensors. + + Returns: + Any: A new struct with the same structure as `x`, but with all + values converted to torch Tensor types. This does not convert possibly + nested elements that are None because torch has no representation for that. + """ + + def mapping(item): + if item is None: + # Torch has no representation for `None`, so we return None + return item + + # Special handling of "Repeated" values. + if isinstance(item, RepeatedValues): + return RepeatedValues( + tree.map_structure(mapping, item.values), item.lengths, item.max_len + ) + + # Already torch tensor -> make sure it's on right device. + if torch.is_tensor(item): + tensor = item + # Numpy arrays. + elif isinstance(item, np.ndarray): + # Object type (e.g. info dicts in train batch): leave as-is. + # str type (e.g. agent_id in train batch): leave as-is. + if item.dtype == object or item.dtype.type is np.str_: + return item + # Non-writable numpy-arrays will cause PyTorch warning. + elif item.flags.writeable is False: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + tensor = torch.from_numpy(item) + # Already numpy: Wrap as torch tensor. + else: + tensor = torch.from_numpy(item) + # Everything else: Convert to numpy, then wrap as torch tensor. + else: + tensor = torch.from_numpy(np.asarray(item)) + + # Floatify all float64 tensors (but leave float16 as-is). + if tensor.is_floating_point() and str(tensor.dtype) != "torch.float16": + tensor = tensor.float() + + # Pin the tensor's memory (for faster transfer to GPU later). + if pin_memory and torch.cuda.is_available(): + tensor.pin_memory() + + return tensor if device is None else tensor.to(device) + + return tree.map_structure(mapping, x) + + +@PublicAPI +def copy_torch_tensors(x: TensorStructType, device: Optional[str] = None): + """Creates a copy of `x` and makes deep copies torch.Tensors in x. + + Also moves the copied tensors to the specified device (if not None). + + Note if an object in x is not a torch.Tensor, it will be shallow-copied. + + Args: + x : Any (possibly nested) struct possibly containing torch.Tensors. + device : The device to move the tensors to. + + Returns: + Any: A new struct with the same structure as `x`, but with all + torch.Tensors deep-copied and moved to the specified device. + + """ + + def mapping(item): + if isinstance(item, torch.Tensor): + return ( + torch.clone(item.detach()) + if device is None + else item.detach().to(device) + ) + else: + return item + + return tree.map_structure(mapping, x) + + +@PublicAPI +def explained_variance(y: TensorType, pred: TensorType) -> TensorType: + """Computes the explained variance for a pair of labels and predictions. + + The formula used is: + max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2)) + + Args: + y: The labels. + pred: The predictions. + + Returns: + The explained variance given a pair of labels and predictions. + """ + y_var = torch.var(y, dim=[0]) + diff_var = torch.var(y - pred, dim=[0]) + min_ = torch.tensor([-1.0]).to(pred.device) + return torch.max(min_, 1 - (diff_var / (y_var + SMALL_NUMBER)))[0] + + +@PublicAPI +def flatten_inputs_to_1d_tensor( + inputs: TensorStructType, + spaces_struct: Optional[SpaceStruct] = None, + time_axis: bool = False, +) -> TensorType: + """Flattens arbitrary input structs according to the given spaces struct. + + Returns a single 1D tensor resulting from the different input + components' values. + + Thereby: + - Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes + are not treated differently from other types of Boxes and get + flattened as well. + - Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with + Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]. + - MultiDiscrete values are multi-one-hot'd, e.g. a batch of + [[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in + [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]. + + Args: + inputs: The inputs to be flattened. + spaces_struct: The structure of the spaces that behind the input + time_axis: Whether all inputs have a time-axis (after the batch axis). + If True, will keep not only the batch axis (0th), but the time axis + (1st) as-is and flatten everything from the 2nd axis up. + + Returns: + A single 1D tensor resulting from concatenating all + flattened/one-hot'd input components. Depending on the time_axis flag, + the shape is (B, n) or (B, T, n). + + .. testcode:: + + from gymnasium.spaces import Discrete, Box + from ray.rllib.utils.torch_utils import flatten_inputs_to_1d_tensor + import torch + struct = { + "a": np.array([1, 3]), + "b": ( + np.array([[1.0, 2.0], [4.0, 5.0]]), + np.array( + [[[8.0], [7.0]], [[5.0], [4.0]]] + ), + ), + "c": { + "cb": np.array([1.0, 2.0]), + }, + } + struct_torch = tree.map_structure(lambda s: torch.from_numpy(s), struct) + spaces = dict( + { + "a": gym.spaces.Discrete(4), + "b": (gym.spaces.Box(-1.0, 10.0, (2,)), gym.spaces.Box(-1.0, 1.0, (2, + 1))), + "c": dict( + { + "cb": gym.spaces.Box(-1.0, 1.0, ()), + } + ), + } + ) + print(flatten_inputs_to_1d_tensor(struct_torch, spaces_struct=spaces)) + + .. testoutput:: + + tensor([[0., 1., 0., 0., 1., 2., 8., 7., 1.], + [0., 0., 0., 1., 4., 5., 5., 4., 2.]]) + + """ + + flat_inputs = tree.flatten(inputs) + flat_spaces = ( + tree.flatten(spaces_struct) + if spaces_struct is not None + else [None] * len(flat_inputs) + ) + + B = None + T = None + out = [] + for input_, space in zip(flat_inputs, flat_spaces): + # Store batch and (if applicable) time dimension. + if B is None: + B = input_.shape[0] + if time_axis: + T = input_.shape[1] + + # One-hot encoding. + if isinstance(space, Discrete): + if time_axis: + input_ = torch.reshape(input_, [B * T]) + out.append(one_hot(input_, space).float()) + # Multi one-hot encoding. + elif isinstance(space, MultiDiscrete): + if time_axis: + input_ = torch.reshape(input_, [B * T, -1]) + out.append(one_hot(input_, space).float()) + # Box: Flatten. + else: + if time_axis: + input_ = torch.reshape(input_, [B * T, -1]) + else: + input_ = torch.reshape(input_, [B, -1]) + out.append(input_.float()) + + merged = torch.cat(out, dim=-1) + # Restore the time-dimension, if applicable. + if time_axis: + merged = torch.reshape(merged, [B, T, -1]) + + return merged + + +@PublicAPI +def global_norm(tensors: List[TensorType]) -> TensorType: + """Returns the global L2 norm over a list of tensors. + + output = sqrt(SUM(t ** 2 for t in tensors)), + where SUM reduces over all tensors and over all elements in tensors. + + Args: + tensors: The list of tensors to calculate the global norm over. + + Returns: + The global L2 norm over the given tensor list. + """ + # List of single tensors' L2 norms: SQRT(SUM(xi^2)) over all xi in tensor. + single_l2s = [torch.pow(torch.sum(torch.pow(t, 2.0)), 0.5) for t in tensors] + # Compute global norm from all single tensors' L2 norms. + return torch.pow(sum(torch.pow(l2, 2.0) for l2 in single_l2s), 0.5) + + +@PublicAPI +def huber_loss(x: TensorType, delta: float = 1.0) -> TensorType: + """Computes the huber loss for a given term and delta parameter. + + Reference: https://en.wikipedia.org/wiki/Huber_loss + Note that the factor of 0.5 is implicitly included in the calculation. + + Formula: + L = 0.5 * x^2 for small abs x (delta threshold) + L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold) + + Args: + x: The input term, e.g. a TD error. + delta: The delta parmameter in the above formula. + + Returns: + The Huber loss resulting from `x` and `delta`. + """ + return torch.where( + torch.abs(x) < delta, + torch.pow(x, 2.0) * 0.5, + delta * (torch.abs(x) - 0.5 * delta), + ) + + +@PublicAPI +def l2_loss(x: TensorType) -> TensorType: + """Computes half the L2 norm over a tensor's values without the sqrt. + + output = 0.5 * sum(x ** 2) + + Args: + x: The input tensor. + + Returns: + 0.5 times the L2 norm over the given tensor's values (w/o sqrt). + """ + return 0.5 * torch.sum(torch.pow(x, 2.0)) + + +@PublicAPI +def minimize_and_clip( + optimizer: "torch.optim.Optimizer", clip_val: float = 10.0 +) -> None: + """Clips grads found in `optimizer.param_groups` to given value in place. + + Ensures the norm of the gradients for each variable is clipped to + `clip_val`. + + Args: + optimizer: The torch.optim.Optimizer to get the variables from. + clip_val: The global norm clip value. Will clip around -clip_val and + +clip_val. + """ + # Loop through optimizer's variables and norm per variable. + for param_group in optimizer.param_groups: + for p in param_group["params"]: + if p.grad is not None: + torch.nn.utils.clip_grad_norm_(p.grad, clip_val) + + +@PublicAPI +def one_hot(x: TensorType, space: gym.Space) -> TensorType: + """Returns a one-hot tensor, given and int tensor and a space. + + Handles the MultiDiscrete case as well. + + Args: + x: The input tensor. + space: The space to use for generating the one-hot tensor. + + Returns: + The resulting one-hot tensor. + + Raises: + ValueError: If the given space is not a discrete one. + + .. testcode:: + + import torch + import gymnasium as gym + from ray.rllib.utils.torch_utils import one_hot + x = torch.IntTensor([0, 3]) # batch-dim=2 + # Discrete space with 4 (one-hot) slots per batch item. + s = gym.spaces.Discrete(4) + print(one_hot(x, s)) + x = torch.IntTensor([[0, 1, 2, 3]]) # batch-dim=1 + # MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots + # per batch item. + s = gym.spaces.MultiDiscrete([5, 4, 4, 7]) + print(one_hot(x, s)) + + .. testoutput:: + + tensor([[1, 0, 0, 0], + [0, 0, 0, 1]]) + tensor([[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]]) + """ + if isinstance(space, Discrete): + return nn.functional.one_hot(x.long(), space.n) + elif isinstance(space, MultiDiscrete): + if isinstance(space.nvec[0], np.ndarray): + nvec = np.ravel(space.nvec) + x = x.reshape(x.shape[0], -1) + else: + nvec = space.nvec + return torch.cat( + [nn.functional.one_hot(x[:, i].long(), n) for i, n in enumerate(nvec)], + dim=-1, + ) + else: + raise ValueError("Unsupported space for `one_hot`: {}".format(space)) + + +@PublicAPI +def reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType: + """Same as torch.mean() but ignores -inf values. + + Args: + x: The input tensor to reduce mean over. + axis: The axis over which to reduce. None for all axes. + + Returns: + The mean reduced inputs, ignoring inf values. + """ + mask = torch.ne(x, float("-inf")) + x_zeroed = torch.where(mask, x, torch.zeros_like(x)) + return torch.sum(x_zeroed, axis) / torch.sum(mask.float(), axis) + + +@PublicAPI +def sequence_mask( + lengths: TensorType, + maxlen: Optional[int] = None, + dtype=None, + time_major: bool = False, +) -> TensorType: + """Offers same behavior as tf.sequence_mask for torch. + + Thanks to Dimitris Papatheodorou + (https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/ + 39036). + + Args: + lengths: The tensor of individual lengths to mask by. + maxlen: The maximum length to use for the time axis. If None, use + the max of `lengths`. + dtype: The torch dtype to use for the resulting mask. + time_major: Whether to return the mask as [B, T] (False; default) or + as [T, B] (True). + + Returns: + The sequence mask resulting from the given input and parameters. + """ + # If maxlen not given, use the longest lengths in the `lengths` tensor. + if maxlen is None: + maxlen = lengths.max() + + mask = torch.ones(tuple(lengths.shape) + (int(maxlen),)) + + mask = ~(mask.to(lengths.device).cumsum(dim=1).t() > lengths) + # Time major transformation. + if not time_major: + mask = mask.t() + + # By default, set the mask to be boolean. + mask.type(dtype or torch.bool) + + return mask + + +@PublicAPI +def update_target_network( + main_net: NetworkType, + target_net: NetworkType, + tau: float, +) -> None: + """Updates a torch.nn.Module target network using Polyak averaging. + + new_target_net_weight = ( + tau * main_net_weight + (1.0 - tau) * current_target_net_weight + ) + + Args: + main_net: The nn.Module to update from. + target_net: The target network to update. + tau: The tau value to use in the Polyak averaging formula. + """ + # Get the current parameters from the Q network. + state_dict = main_net.state_dict() + # Use here Polyak averaging. + new_state_dict = { + k: tau * state_dict[k] + (1 - tau) * v + for k, v in target_net.state_dict().items() + } + # Apply the new parameters to the target Q network. + target_net.load_state_dict(new_state_dict) + + +@DeveloperAPI +def warn_if_infinite_kl_divergence( + policy: "TorchPolicy", + kl_divergence: TensorType, +) -> None: + if policy.loss_initialized() and kl_divergence.isinf(): + logger.warning( + "KL divergence is non-finite, this will likely destabilize your model and" + " the training process. Action(s) in a specific state have near-zero" + " probability. This can happen naturally in deterministic environments" + " where the optimal policy has zero mass for a specific action. To fix this" + " issue, consider setting the coefficient for the KL loss term to zero or" + " increasing policy entropy." + ) + + +@PublicAPI +def set_torch_seed(seed: Optional[int] = None) -> None: + """Sets the torch random seed to the given value. + + Args: + seed: The seed to use or None for no seeding. + """ + if seed is not None and torch: + torch.manual_seed(seed) + # See https://github.com/pytorch/pytorch/issues/47672. + cuda_version = torch.version.cuda + if cuda_version is not None and float(torch.version.cuda) >= 10.2: + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "4096:8" + else: + # Not all Operations support this. + torch.use_deterministic_algorithms(True) + # This is only for Convolution no problem. + torch.backends.cudnn.deterministic = True + + +@PublicAPI +def softmax_cross_entropy_with_logits( + logits: TensorType, + labels: TensorType, +) -> TensorType: + """Same behavior as tf.nn.softmax_cross_entropy_with_logits. + + Args: + x: The input predictions. + labels: The labels corresponding to `x`. + + Returns: + The resulting softmax cross-entropy given predictions and labels. + """ + return torch.sum(-labels * nn.functional.log_softmax(logits, -1), -1) + + +def _dynamo_is_available(): + # This only works if torch._dynamo is available + try: + # TODO(Artur): Remove this once torch._dynamo is available on CI + import torch._dynamo as dynamo # noqa: F401 + + return True + except ImportError: + return False